From c35f93bb0dad05c7fac0a563ccbfd509cc5e8c7a Mon Sep 17 00:00:00 2001
From: Hacl Bot <hacl-star@mailo.com>
Date: Thu, 2 Nov 2023 21:12:56 +0000
Subject: [PATCH] [CI] update code

---
 include/EverCrypt_Chacha20Poly1305.h          |    6 +-
 include/EverCrypt_HMAC.h                      |    5 +-
 include/EverCrypt_Hash.h                      |   64 +-
 include/EverCrypt_Poly1305.h                  |    8 +-
 ...1305_32.h => Hacl_AEAD_Chacha20Poly1305.h} |   70 +-
 include/Hacl_AEAD_Chacha20Poly1305_Simd128.h  |  104 ++
 include/Hacl_AEAD_Chacha20Poly1305_Simd256.h  |  104 ++
 include/Hacl_Chacha20Poly1305_128.h           |  107 --
 include/Hacl_Chacha20Poly1305_256.h           |  107 --
 include/Hacl_HMAC.h                           |    5 +-
 include/Hacl_HMAC_Blake2b_256.h               |    2 +-
 include/Hacl_HMAC_Blake2s_128.h               |    2 +-
 include/Hacl_HPKE_Curve51_CP128_SHA256.h      |    2 +-
 include/Hacl_HPKE_Curve51_CP128_SHA512.h      |    2 +-
 include/Hacl_HPKE_Curve51_CP256_SHA256.h      |    2 +-
 include/Hacl_HPKE_Curve51_CP256_SHA512.h      |    2 +-
 include/Hacl_HPKE_Curve51_CP32_SHA256.h       |    2 +-
 include/Hacl_HPKE_Curve51_CP32_SHA512.h       |    2 +-
 include/Hacl_HPKE_Curve64_CP128_SHA256.h      |    2 +-
 include/Hacl_HPKE_Curve64_CP128_SHA512.h      |    2 +-
 include/Hacl_HPKE_Curve64_CP256_SHA256.h      |    2 +-
 include/Hacl_HPKE_Curve64_CP256_SHA512.h      |    2 +-
 include/Hacl_HPKE_Curve64_CP32_SHA256.h       |    2 +-
 include/Hacl_HPKE_Curve64_CP32_SHA512.h       |    2 +-
 include/Hacl_HPKE_P256_CP128_SHA256.h         |    2 +-
 include/Hacl_HPKE_P256_CP256_SHA256.h         |    2 +-
 include/Hacl_HPKE_P256_CP32_SHA256.h          |    2 +-
 include/Hacl_Hash_Blake2.h                    |  155 --
 ...ming_Blake2b_256.h => Hacl_Hash_Blake2b.h} |   64 +-
 include/Hacl_Hash_Blake2b_Simd256.h           |  113 ++
 ...ming_Blake2s_128.h => Hacl_Hash_Blake2s.h} |   64 +-
 include/Hacl_Hash_Blake2s_Simd128.h           |  112 ++
 include/Hacl_Hash_MD5.h                       |   16 +-
 include/Hacl_Hash_SHA1.h                      |   16 +-
 include/Hacl_Hash_SHA2.h                      |  104 +-
 include/Hacl_Hash_SHA3.h                      |   50 +-
 include/Hacl_IntTypes_Intrinsics.h            |    9 +-
 include/Hacl_IntTypes_Intrinsics_128.h        |    7 +-
 ...ming_Poly1305_32.h => Hacl_MAC_Poly1305.h} |   33 +-
 ...1305_128.h => Hacl_MAC_Poly1305_Simd128.h} |   41 +-
 ...1305_256.h => Hacl_MAC_Poly1305_Simd256.h} |   41 +-
 include/Hacl_NaCl.h                           |    2 +-
 include/Hacl_Poly1305_256.h                   |   67 -
 include/Hacl_Poly1305_32.h                    |   57 -
 include/Hacl_Streaming_Blake2.h               |  147 --
 include/internal/EverCrypt_HMAC.h             |    4 +-
 include/internal/EverCrypt_Hash.h             |    6 +-
 include/internal/Hacl_Bignum25519_51.h        |  345 ++--
 include/internal/Hacl_Bignum_Base.h           |  292 ++--
 include/internal/Hacl_Bignum_K256.h           |  322 ++--
 include/internal/Hacl_Ed25519_PrecompTable.h  | 1086 ++++++------
 include/internal/Hacl_Frodo_KEM.h             |  357 ++--
 .../Hacl_HMAC.h}                              |   14 +-
 include/internal/Hacl_Hash_Blake2b.h          |   70 +
 .../Hacl_Hash_Blake2b_Simd256.h}              |   52 +-
 include/internal/Hacl_Hash_Blake2s.h          |   70 +
 .../Hacl_Hash_Blake2s_Simd128.h}              |   51 +-
 include/internal/Hacl_Hash_MD5.h              |   17 +-
 include/internal/Hacl_Hash_SHA1.h             |   17 +-
 include/internal/Hacl_Hash_SHA2.h             |  138 +-
 include/internal/Hacl_Hash_SHA3.h             |    4 +-
 include/internal/Hacl_Impl_Blake2_Constants.h |   50 +-
 include/internal/Hacl_Impl_FFDHE_Constants.h  |  723 +++-----
 include/internal/Hacl_K256_PrecompTable.h     |  816 ++++-----
 ...acl_Poly1305_128.h => Hacl_MAC_Poly1305.h} |   18 +-
 .../Hacl_MAC_Poly1305_Simd128.h}              |   23 +-
 .../Hacl_MAC_Poly1305_Simd256.h}              |   23 +-
 include/internal/Hacl_P256_PrecompTable.h     |  776 ++++-----
 include/internal/Hacl_SHA2_Types.h            |   56 +-
 include/msvc/EverCrypt_Chacha20Poly1305.h     |    6 +-
 include/msvc/EverCrypt_HMAC.h                 |    5 +-
 include/msvc/EverCrypt_Hash.h                 |   64 +-
 include/msvc/EverCrypt_Poly1305.h             |    8 +-
 .../Hacl_AEAD_Chacha20Poly1305.h}             |   70 +-
 .../msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.h |  104 ++
 .../msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.h |  104 ++
 include/msvc/Hacl_Chacha20Poly1305_128.h      |  107 --
 include/msvc/Hacl_Chacha20Poly1305_256.h      |  107 --
 include/msvc/Hacl_HMAC.h                      |    5 +-
 include/msvc/Hacl_HMAC_Blake2b_256.h          |    2 +-
 include/msvc/Hacl_HMAC_Blake2s_128.h          |    2 +-
 include/msvc/Hacl_HPKE_Curve51_CP128_SHA256.h |    2 +-
 include/msvc/Hacl_HPKE_Curve51_CP128_SHA512.h |    2 +-
 include/msvc/Hacl_HPKE_Curve51_CP256_SHA256.h |    2 +-
 include/msvc/Hacl_HPKE_Curve51_CP256_SHA512.h |    2 +-
 include/msvc/Hacl_HPKE_Curve51_CP32_SHA256.h  |    2 +-
 include/msvc/Hacl_HPKE_Curve51_CP32_SHA512.h  |    2 +-
 include/msvc/Hacl_HPKE_Curve64_CP128_SHA256.h |    2 +-
 include/msvc/Hacl_HPKE_Curve64_CP128_SHA512.h |    2 +-
 include/msvc/Hacl_HPKE_Curve64_CP256_SHA256.h |    2 +-
 include/msvc/Hacl_HPKE_Curve64_CP256_SHA512.h |    2 +-
 include/msvc/Hacl_HPKE_Curve64_CP32_SHA256.h  |    2 +-
 include/msvc/Hacl_HPKE_Curve64_CP32_SHA512.h  |    2 +-
 include/msvc/Hacl_HPKE_P256_CP128_SHA256.h    |    2 +-
 include/msvc/Hacl_HPKE_P256_CP256_SHA256.h    |    2 +-
 include/msvc/Hacl_HPKE_P256_CP32_SHA256.h     |    2 +-
 include/msvc/Hacl_Hash_Blake2.h               |  155 --
 ...ming_Blake2b_256.h => Hacl_Hash_Blake2b.h} |   64 +-
 include/msvc/Hacl_Hash_Blake2b_Simd256.h      |  113 ++
 ...ming_Blake2s_128.h => Hacl_Hash_Blake2s.h} |   64 +-
 include/msvc/Hacl_Hash_Blake2s_Simd128.h      |  112 ++
 include/msvc/Hacl_Hash_MD5.h                  |   16 +-
 include/msvc/Hacl_Hash_SHA1.h                 |   16 +-
 include/msvc/Hacl_Hash_SHA2.h                 |  104 +-
 include/msvc/Hacl_Hash_SHA3.h                 |   50 +-
 include/msvc/Hacl_IntTypes_Intrinsics.h       |    9 +-
 include/msvc/Hacl_IntTypes_Intrinsics_128.h   |    7 +-
 .../Hacl_MAC_Poly1305.h}                      |   33 +-
 ...1305_128.h => Hacl_MAC_Poly1305_Simd128.h} |   41 +-
 .../Hacl_MAC_Poly1305_Simd256.h}              |   41 +-
 include/msvc/Hacl_NaCl.h                      |    2 +-
 include/msvc/Hacl_Poly1305_128.h              |   67 -
 include/msvc/Hacl_Poly1305_32.h               |   57 -
 include/msvc/Hacl_Streaming_Blake2.h          |  147 --
 include/msvc/internal/EverCrypt_HMAC.h        |    4 +-
 include/msvc/internal/EverCrypt_Hash.h        |    6 +-
 include/msvc/internal/Hacl_Bignum25519_51.h   |  345 ++--
 include/msvc/internal/Hacl_Bignum_Base.h      |  292 ++--
 include/msvc/internal/Hacl_Bignum_K256.h      |  322 ++--
 .../msvc/internal/Hacl_Ed25519_PrecompTable.h | 1086 ++++++------
 include/msvc/internal/Hacl_Frodo_KEM.h        |  357 ++--
 .../internal/Hacl_HMAC.h}                     |   14 +-
 include/msvc/internal/Hacl_Hash_Blake2b.h     |   70 +
 .../Hacl_Hash_Blake2b_Simd256.h}              |   52 +-
 include/msvc/internal/Hacl_Hash_Blake2s.h     |   70 +
 .../internal/Hacl_Hash_Blake2s_Simd128.h}     |   51 +-
 include/msvc/internal/Hacl_Hash_MD5.h         |   17 +-
 include/msvc/internal/Hacl_Hash_SHA1.h        |   17 +-
 include/msvc/internal/Hacl_Hash_SHA2.h        |  138 +-
 include/msvc/internal/Hacl_Hash_SHA3.h        |    4 +-
 .../internal/Hacl_Impl_Blake2_Constants.h     |   50 +-
 .../msvc/internal/Hacl_Impl_FFDHE_Constants.h |  723 +++-----
 .../msvc/internal/Hacl_K256_PrecompTable.h    |  816 ++++-----
 .../internal/Hacl_MAC_Poly1305.h}             |   18 +-
 .../internal/Hacl_MAC_Poly1305_Simd128.h}     |   27 +-
 .../Hacl_MAC_Poly1305_Simd256.h}              |   27 +-
 .../msvc/internal/Hacl_P256_PrecompTable.h    |  776 ++++-----
 include/msvc/internal/Hacl_SHA2_Types.h       |   56 +-
 src/EverCrypt_AEAD.c                          | 1460 ++++++++---------
 src/EverCrypt_AutoConfig2.c                   |   32 +-
 src/EverCrypt_Chacha20Poly1305.c              |   28 +-
 src/EverCrypt_DRBG.c                          |  922 ++++++-----
 src/EverCrypt_HKDF.c                          |  156 +-
 src/EverCrypt_HMAC.c                          |  471 +++---
 src/EverCrypt_Hash.c                          |  787 ++++-----
 src/EverCrypt_Poly1305.c                      |   46 +-
 ...1305_32.c => Hacl_AEAD_Chacha20Poly1305.c} |  274 ++--
 ...c => Hacl_AEAD_Chacha20Poly1305_Simd128.c} |  460 +++---
 ...c => Hacl_AEAD_Chacha20Poly1305_Simd256.c} |  457 +++---
 src/Hacl_Bignum.c                             | 1268 +++++++-------
 src/Hacl_Bignum256.c                          |  968 ++++++-----
 src/Hacl_Bignum256_32.c                       | 1083 ++++++------
 src/Hacl_Bignum32.c                           |  280 ++--
 src/Hacl_Bignum4096.c                         |  856 +++++-----
 src/Hacl_Bignum4096_32.c                      |  802 +++++----
 src/Hacl_Bignum64.c                           |  280 ++--
 src/Hacl_Chacha20.c                           |   97 +-
 src/Hacl_Chacha20_Vec128.c                    |  189 ++-
 src/Hacl_Chacha20_Vec256.c                    |  192 ++-
 src/Hacl_Chacha20_Vec32.c                     |  184 +--
 src/Hacl_Curve25519_51.c                      |  197 ++-
 src/Hacl_Curve25519_64.c                      |  213 ++-
 src/Hacl_EC_Ed25519.c                         |   74 +-
 src/Hacl_EC_K256.c                            |   62 +-
 src/Hacl_Ed25519.c                            | 1217 +++++++-------
 src/Hacl_FFDHE.c                              |  112 +-
 src/Hacl_Frodo1344.c                          |  311 ++--
 src/Hacl_Frodo64.c                            |  305 ++--
 src/Hacl_Frodo640.c                           |  315 ++--
 src/Hacl_Frodo976.c                           |  311 ++--
 src/Hacl_Frodo_KEM.c                          |    2 +-
 src/Hacl_GenericField32.c                     |  199 ++-
 src/Hacl_GenericField64.c                     |  199 ++-
 src/Hacl_HKDF.c                               |  130 +-
 src/Hacl_HKDF_Blake2b_256.c                   |   34 +-
 src/Hacl_HKDF_Blake2s_128.c                   |   34 +-
 src/Hacl_HMAC.c                               |  476 +++---
 src/Hacl_HMAC_Blake2b_256.c                   |   86 +-
 src/Hacl_HMAC_Blake2s_128.c                   |   74 +-
 src/Hacl_HMAC_DRBG.c                          |  730 ++++-----
 src/Hacl_HPKE_Curve51_CP128_SHA256.c          |  753 ++++-----
 src/Hacl_HPKE_Curve51_CP128_SHA512.c          |  753 ++++-----
 src/Hacl_HPKE_Curve51_CP256_SHA256.c          |  753 ++++-----
 src/Hacl_HPKE_Curve51_CP256_SHA512.c          |  753 ++++-----
 src/Hacl_HPKE_Curve51_CP32_SHA256.c           |  753 ++++-----
 src/Hacl_HPKE_Curve51_CP32_SHA512.c           |  753 ++++-----
 src/Hacl_HPKE_Curve64_CP128_SHA256.c          |  753 ++++-----
 src/Hacl_HPKE_Curve64_CP128_SHA512.c          |  753 ++++-----
 src/Hacl_HPKE_Curve64_CP256_SHA256.c          |  753 ++++-----
 src/Hacl_HPKE_Curve64_CP256_SHA512.c          |  753 ++++-----
 src/Hacl_HPKE_Curve64_CP32_SHA256.c           |  753 ++++-----
 src/Hacl_HPKE_Curve64_CP32_SHA512.c           |  753 ++++-----
 src/Hacl_HPKE_P256_CP128_SHA256.c             |  761 ++++-----
 src/Hacl_HPKE_P256_CP256_SHA256.c             |  761 ++++-----
 src/Hacl_HPKE_P256_CP32_SHA256.c              |  761 ++++-----
 src/Hacl_Hash_Base.c                          |   76 +-
 src/Hacl_Hash_Blake2.c                        | 1324 ---------------
 src/Hacl_Hash_Blake2b.c                       |  971 +++++++++++
 src/Hacl_Hash_Blake2b_256.c                   |  499 ------
 src/Hacl_Hash_Blake2b_Simd256.c               |  828 ++++++++++
 src/Hacl_Hash_Blake2s.c                       |  931 +++++++++++
 src/Hacl_Hash_Blake2s_128.c                   |  491 ------
 src/Hacl_Hash_Blake2s_Simd128.c               |  794 +++++++++
 src/Hacl_Hash_MD5.c                           |  688 ++++----
 src/Hacl_Hash_SHA1.c                          |  339 ++--
 src/Hacl_Hash_SHA2.c                          |  932 +++++------
 src/Hacl_Hash_SHA3.c                          |  508 +++---
 src/Hacl_K256_ECDSA.c                         | 1335 ++++++++-------
 src/Hacl_MAC_Poly1305.c                       |  712 ++++++++
 ...1305_128.c => Hacl_MAC_Poly1305_Simd128.c} | 1006 ++++++------
 ...1305_256.c => Hacl_MAC_Poly1305_Simd256.c} | 1094 ++++++------
 src/Hacl_NaCl.c                               |   92 +-
 src/Hacl_P256.c                               | 1090 ++++++------
 src/Hacl_Poly1305_32.c                        |  572 -------
 src/Hacl_RSAPSS.c                             |  388 ++---
 src/Hacl_SHA2_Vec128.c                        |  384 ++---
 src/Hacl_SHA2_Vec256.c                        |  848 +++++-----
 src/Hacl_Salsa20.c                            |  302 ++--
 src/Hacl_Streaming_Blake2.c                   |  655 --------
 src/Hacl_Streaming_Blake2b_256.c              |  371 -----
 src/Hacl_Streaming_Blake2s_128.c              |  341 ----
 src/Hacl_Streaming_Poly1305_128.c             |  341 ----
 src/Hacl_Streaming_Poly1305_256.c             |  341 ----
 src/Hacl_Streaming_Poly1305_32.c              |  308 ----
 src/msvc/EverCrypt_AEAD.c                     | 1460 ++++++++---------
 src/msvc/EverCrypt_AutoConfig2.c              |   32 +-
 src/msvc/EverCrypt_Chacha20Poly1305.c         |   28 +-
 src/msvc/EverCrypt_DRBG.c                     |  922 ++++++-----
 src/msvc/EverCrypt_HKDF.c                     |  156 +-
 src/msvc/EverCrypt_HMAC.c                     |  471 +++---
 src/msvc/EverCrypt_Hash.c                     |  787 ++++-----
 src/msvc/EverCrypt_Poly1305.c                 |   46 +-
 ...1305_32.c => Hacl_AEAD_Chacha20Poly1305.c} |  274 ++--
 ...c => Hacl_AEAD_Chacha20Poly1305_Simd128.c} |  460 +++---
 .../Hacl_AEAD_Chacha20Poly1305_Simd256.c}     |  457 +++---
 src/msvc/Hacl_Bignum.c                        | 1268 +++++++-------
 src/msvc/Hacl_Bignum256.c                     |  968 ++++++-----
 src/msvc/Hacl_Bignum256_32.c                  | 1083 ++++++------
 src/msvc/Hacl_Bignum32.c                      |  280 ++--
 src/msvc/Hacl_Bignum4096.c                    |  856 +++++-----
 src/msvc/Hacl_Bignum4096_32.c                 |  802 +++++----
 src/msvc/Hacl_Bignum64.c                      |  280 ++--
 src/msvc/Hacl_Chacha20.c                      |   97 +-
 src/msvc/Hacl_Chacha20_Vec128.c               |  189 ++-
 src/msvc/Hacl_Chacha20_Vec256.c               |  192 ++-
 src/msvc/Hacl_Chacha20_Vec32.c                |  184 +--
 src/msvc/Hacl_Curve25519_51.c                 |  197 ++-
 src/msvc/Hacl_Curve25519_64.c                 |  213 ++-
 src/msvc/Hacl_EC_Ed25519.c                    |   74 +-
 src/msvc/Hacl_EC_K256.c                       |   62 +-
 src/msvc/Hacl_Ed25519.c                       | 1217 +++++++-------
 src/msvc/Hacl_FFDHE.c                         |  112 +-
 src/msvc/Hacl_Frodo1344.c                     |  311 ++--
 src/msvc/Hacl_Frodo64.c                       |  305 ++--
 src/msvc/Hacl_Frodo640.c                      |  315 ++--
 src/msvc/Hacl_Frodo976.c                      |  311 ++--
 src/msvc/Hacl_Frodo_KEM.c                     |    2 +-
 src/msvc/Hacl_GenericField32.c                |  199 ++-
 src/msvc/Hacl_GenericField64.c                |  199 ++-
 src/msvc/Hacl_HKDF.c                          |  130 +-
 src/msvc/Hacl_HKDF_Blake2b_256.c              |   34 +-
 src/msvc/Hacl_HKDF_Blake2s_128.c              |   34 +-
 src/msvc/Hacl_HMAC.c                          |  476 +++---
 src/msvc/Hacl_HMAC_Blake2b_256.c              |   86 +-
 src/msvc/Hacl_HMAC_Blake2s_128.c              |   74 +-
 src/msvc/Hacl_HMAC_DRBG.c                     |  730 ++++-----
 src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c     |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c     |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c     |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c     |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c      |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c      |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c     |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c     |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c     |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c     |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c      |  753 ++++-----
 src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c      |  753 ++++-----
 src/msvc/Hacl_HPKE_P256_CP128_SHA256.c        |  761 ++++-----
 src/msvc/Hacl_HPKE_P256_CP256_SHA256.c        |  761 ++++-----
 src/msvc/Hacl_HPKE_P256_CP32_SHA256.c         |  761 ++++-----
 src/msvc/Hacl_Hash_Base.c                     |   76 +-
 src/msvc/Hacl_Hash_Blake2.c                   | 1324 ---------------
 src/msvc/Hacl_Hash_Blake2b.c                  |  971 +++++++++++
 src/msvc/Hacl_Hash_Blake2b_256.c              |  499 ------
 src/msvc/Hacl_Hash_Blake2b_Simd256.c          |  828 ++++++++++
 src/msvc/Hacl_Hash_Blake2s.c                  |  931 +++++++++++
 src/msvc/Hacl_Hash_Blake2s_128.c              |  491 ------
 src/msvc/Hacl_Hash_Blake2s_Simd128.c          |  794 +++++++++
 src/msvc/Hacl_Hash_MD5.c                      |  688 ++++----
 src/msvc/Hacl_Hash_SHA1.c                     |  339 ++--
 src/msvc/Hacl_Hash_SHA2.c                     |  932 +++++------
 src/msvc/Hacl_Hash_SHA3.c                     |  508 +++---
 src/msvc/Hacl_K256_ECDSA.c                    | 1335 ++++++++-------
 src/msvc/Hacl_MAC_Poly1305.c                  |  712 ++++++++
 .../Hacl_MAC_Poly1305_Simd128.c}              | 1006 ++++++------
 .../Hacl_MAC_Poly1305_Simd256.c}              | 1094 ++++++------
 src/msvc/Hacl_NaCl.c                          |   92 +-
 src/msvc/Hacl_P256.c                          | 1090 ++++++------
 src/msvc/Hacl_Poly1305_32.c                   |  572 -------
 src/msvc/Hacl_RSAPSS.c                        |  390 ++---
 src/msvc/Hacl_SHA2_Vec128.c                   |  384 ++---
 src/msvc/Hacl_SHA2_Vec256.c                   |  848 +++++-----
 src/msvc/Hacl_Salsa20.c                       |  302 ++--
 src/msvc/Hacl_Streaming_Blake2.c              |  655 --------
 src/msvc/Hacl_Streaming_Blake2b_256.c         |  371 -----
 src/msvc/Hacl_Streaming_Blake2s_128.c         |  341 ----
 src/msvc/Hacl_Streaming_Poly1305_128.c        |  341 ----
 src/msvc/Hacl_Streaming_Poly1305_256.c        |  341 ----
 src/msvc/Hacl_Streaming_Poly1305_32.c         |  308 ----
 src/wasm/EverCrypt_Hash.wasm                  |  Bin 49373 -> 49305 bytes
 ...2.wasm => Hacl_AEAD_Chacha20Poly1305.wasm} |  Bin 7657 -> 7653 bytes
 .../Hacl_AEAD_Chacha20Poly1305_Simd128.wasm   |  Bin 0 -> 1910 bytes
 .../Hacl_AEAD_Chacha20Poly1305_Simd256.wasm   |  Bin 0 -> 1910 bytes
 ..._Chacha20_Vec128_Hacl_Chacha20_Vec256.wasm |  Bin 0 -> 1845 bytes
 src/wasm/Hacl_Ed25519.wasm                    |  Bin 77696 -> 77669 bytes
 src/wasm/Hacl_HMAC.wasm                       |  Bin 29855 -> 29754 bytes
 src/wasm/Hacl_HMAC_Blake2b_256.wasm           |  Bin 1491 -> 1510 bytes
 src/wasm/Hacl_HMAC_Blake2s_128.wasm           |  Bin 1489 -> 1508 bytes
 src/wasm/Hacl_HMAC_DRBG.wasm                  |  Bin 25403 -> 25396 bytes
 src/wasm/Hacl_HPKE_Curve51_CP32_SHA256.wasm   |  Bin 21306 -> 21320 bytes
 src/wasm/Hacl_HPKE_Curve51_CP32_SHA512.wasm   |  Bin 21434 -> 21448 bytes
 src/wasm/Hacl_Hash_Blake2.wasm                |  Bin 20644 -> 0 bytes
 src/wasm/Hacl_Hash_Blake2b.wasm               |  Bin 0 -> 15858 bytes
 src/wasm/Hacl_Hash_Blake2b_256.wasm           |  Bin 4552 -> 0 bytes
 src/wasm/Hacl_Hash_Blake2b_Simd256.wasm       |  Bin 0 -> 6794 bytes
 src/wasm/Hacl_Hash_Blake2s.wasm               |  Bin 0 -> 14005 bytes
 src/wasm/Hacl_Hash_Blake2s_128.wasm           |  Bin 3629 -> 0 bytes
 src/wasm/Hacl_Hash_Blake2s_Simd128.wasm       |  Bin 0 -> 5638 bytes
 src/wasm/Hacl_Hash_MD5.wasm                   |  Bin 15558 -> 15447 bytes
 src/wasm/Hacl_Hash_SHA1.wasm                  |  Bin 13148 -> 13044 bytes
 src/wasm/Hacl_Hash_SHA2.wasm                  |  Bin 23682 -> 23468 bytes
 src/wasm/Hacl_Hash_SHA3.wasm                  |  Bin 17611 -> 17565 bytes
 src/wasm/Hacl_Impl_Blake2_Constants.wasm      |  Bin 1544 -> 1517 bytes
 src/wasm/Hacl_K256_ECDSA.wasm                 |  Bin 98193 -> 98188 bytes
 src/wasm/Hacl_MAC_Poly1305.wasm               |  Bin 0 -> 9539 bytes
 src/wasm/Hacl_NaCl.wasm                       |  Bin 5027 -> 5020 bytes
 src/wasm/Hacl_P256.wasm                       |  Bin 83213 -> 83198 bytes
 ..._Hacl_Poly1305_256_Hacl_Impl_Poly1305.wasm |  Bin 0 -> 1993 bytes
 src/wasm/Hacl_Poly1305_32.wasm                |  Bin 6511 -> 0 bytes
 src/wasm/Hacl_Streaming_Blake2.wasm           |  Bin 12128 -> 0 bytes
 src/wasm/Hacl_Streaming_Blake2b_256.wasm      |  Bin 4495 -> 0 bytes
 src/wasm/Hacl_Streaming_Blake2s_128.wasm      |  Bin 4260 -> 0 bytes
 src/wasm/Hacl_Streaming_Poly1305_32.wasm      |  Bin 5365 -> 0 bytes
 src/wasm/INFO.txt                             |    4 +-
 src/wasm/layouts.json                         |    2 +-
 src/wasm/main.html                            |    2 +-
 src/wasm/shell.js                             |    2 +-
 348 files changed, 49186 insertions(+), 57424 deletions(-)
 rename include/{msvc/Hacl_Chacha20Poly1305_32.h => Hacl_AEAD_Chacha20Poly1305.h} (67%)
 create mode 100644 include/Hacl_AEAD_Chacha20Poly1305_Simd128.h
 create mode 100644 include/Hacl_AEAD_Chacha20Poly1305_Simd256.h
 delete mode 100644 include/Hacl_Chacha20Poly1305_128.h
 delete mode 100644 include/Hacl_Chacha20Poly1305_256.h
 delete mode 100644 include/Hacl_Hash_Blake2.h
 rename include/{Hacl_Streaming_Blake2b_256.h => Hacl_Hash_Blake2b.h} (56%)
 create mode 100644 include/Hacl_Hash_Blake2b_Simd256.h
 rename include/{Hacl_Streaming_Blake2s_128.h => Hacl_Hash_Blake2s.h} (56%)
 create mode 100644 include/Hacl_Hash_Blake2s_Simd128.h
 rename include/{msvc/Hacl_Streaming_Poly1305_32.h => Hacl_MAC_Poly1305.h} (67%)
 rename include/{Hacl_Streaming_Poly1305_128.h => Hacl_MAC_Poly1305_Simd128.h} (67%)
 rename include/{msvc/Hacl_Streaming_Poly1305_256.h => Hacl_MAC_Poly1305_Simd256.h} (67%)
 delete mode 100644 include/Hacl_Poly1305_256.h
 delete mode 100644 include/Hacl_Poly1305_32.h
 delete mode 100644 include/Hacl_Streaming_Blake2.h
 rename include/{msvc/internal/Hacl_Hash_Blake2.h => internal/Hacl_HMAC.h} (82%)
 create mode 100644 include/internal/Hacl_Hash_Blake2b.h
 rename include/{Hacl_Hash_Blake2b_256.h => internal/Hacl_Hash_Blake2b_Simd256.h} (61%)
 create mode 100644 include/internal/Hacl_Hash_Blake2s.h
 rename include/{msvc/Hacl_Hash_Blake2s_128.h => internal/Hacl_Hash_Blake2s_Simd128.h} (61%)
 rename include/internal/{Hacl_Poly1305_128.h => Hacl_MAC_Poly1305.h} (77%)
 rename include/{msvc/internal/Hacl_Poly1305_128.h => internal/Hacl_MAC_Poly1305_Simd128.h} (73%)
 rename include/{msvc/internal/Hacl_Poly1305_256.h => internal/Hacl_MAC_Poly1305_Simd256.h} (73%)
 rename include/{Hacl_Chacha20Poly1305_32.h => msvc/Hacl_AEAD_Chacha20Poly1305.h} (67%)
 create mode 100644 include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.h
 create mode 100644 include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.h
 delete mode 100644 include/msvc/Hacl_Chacha20Poly1305_128.h
 delete mode 100644 include/msvc/Hacl_Chacha20Poly1305_256.h
 delete mode 100644 include/msvc/Hacl_Hash_Blake2.h
 rename include/msvc/{Hacl_Streaming_Blake2b_256.h => Hacl_Hash_Blake2b.h} (56%)
 create mode 100644 include/msvc/Hacl_Hash_Blake2b_Simd256.h
 rename include/msvc/{Hacl_Streaming_Blake2s_128.h => Hacl_Hash_Blake2s.h} (56%)
 create mode 100644 include/msvc/Hacl_Hash_Blake2s_Simd128.h
 rename include/{Hacl_Streaming_Poly1305_32.h => msvc/Hacl_MAC_Poly1305.h} (67%)
 rename include/msvc/{Hacl_Streaming_Poly1305_128.h => Hacl_MAC_Poly1305_Simd128.h} (67%)
 rename include/{Hacl_Streaming_Poly1305_256.h => msvc/Hacl_MAC_Poly1305_Simd256.h} (67%)
 delete mode 100644 include/msvc/Hacl_Poly1305_128.h
 delete mode 100644 include/msvc/Hacl_Poly1305_32.h
 delete mode 100644 include/msvc/Hacl_Streaming_Blake2.h
 rename include/{internal/Hacl_Hash_Blake2.h => msvc/internal/Hacl_HMAC.h} (82%)
 create mode 100644 include/msvc/internal/Hacl_Hash_Blake2b.h
 rename include/msvc/{Hacl_Hash_Blake2b_256.h => internal/Hacl_Hash_Blake2b_Simd256.h} (61%)
 create mode 100644 include/msvc/internal/Hacl_Hash_Blake2s.h
 rename include/{Hacl_Hash_Blake2s_128.h => msvc/internal/Hacl_Hash_Blake2s_Simd128.h} (61%)
 rename include/{internal/Hacl_Poly1305_256.h => msvc/internal/Hacl_MAC_Poly1305.h} (77%)
 rename include/{Hacl_Poly1305_128.h => msvc/internal/Hacl_MAC_Poly1305_Simd128.h} (72%)
 rename include/msvc/{Hacl_Poly1305_256.h => internal/Hacl_MAC_Poly1305_Simd256.h} (72%)
 rename src/{Hacl_Chacha20Poly1305_32.c => Hacl_AEAD_Chacha20Poly1305.c} (70%)
 rename src/{Hacl_Chacha20Poly1305_128.c => Hacl_AEAD_Chacha20Poly1305_Simd128.c} (77%)
 rename src/{msvc/Hacl_Chacha20Poly1305_256.c => Hacl_AEAD_Chacha20Poly1305_Simd256.c} (77%)
 delete mode 100644 src/Hacl_Hash_Blake2.c
 create mode 100644 src/Hacl_Hash_Blake2b.c
 delete mode 100644 src/Hacl_Hash_Blake2b_256.c
 create mode 100644 src/Hacl_Hash_Blake2b_Simd256.c
 create mode 100644 src/Hacl_Hash_Blake2s.c
 delete mode 100644 src/Hacl_Hash_Blake2s_128.c
 create mode 100644 src/Hacl_Hash_Blake2s_Simd128.c
 create mode 100644 src/Hacl_MAC_Poly1305.c
 rename src/{msvc/Hacl_Poly1305_128.c => Hacl_MAC_Poly1305_Simd128.c} (66%)
 rename src/{msvc/Hacl_Poly1305_256.c => Hacl_MAC_Poly1305_Simd256.c} (71%)
 delete mode 100644 src/Hacl_Poly1305_32.c
 delete mode 100644 src/Hacl_Streaming_Blake2.c
 delete mode 100644 src/Hacl_Streaming_Blake2b_256.c
 delete mode 100644 src/Hacl_Streaming_Blake2s_128.c
 delete mode 100644 src/Hacl_Streaming_Poly1305_128.c
 delete mode 100644 src/Hacl_Streaming_Poly1305_256.c
 delete mode 100644 src/Hacl_Streaming_Poly1305_32.c
 rename src/msvc/{Hacl_Chacha20Poly1305_32.c => Hacl_AEAD_Chacha20Poly1305.c} (70%)
 rename src/msvc/{Hacl_Chacha20Poly1305_128.c => Hacl_AEAD_Chacha20Poly1305_Simd128.c} (77%)
 rename src/{Hacl_Chacha20Poly1305_256.c => msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.c} (77%)
 delete mode 100644 src/msvc/Hacl_Hash_Blake2.c
 create mode 100644 src/msvc/Hacl_Hash_Blake2b.c
 delete mode 100644 src/msvc/Hacl_Hash_Blake2b_256.c
 create mode 100644 src/msvc/Hacl_Hash_Blake2b_Simd256.c
 create mode 100644 src/msvc/Hacl_Hash_Blake2s.c
 delete mode 100644 src/msvc/Hacl_Hash_Blake2s_128.c
 create mode 100644 src/msvc/Hacl_Hash_Blake2s_Simd128.c
 create mode 100644 src/msvc/Hacl_MAC_Poly1305.c
 rename src/{Hacl_Poly1305_128.c => msvc/Hacl_MAC_Poly1305_Simd128.c} (66%)
 rename src/{Hacl_Poly1305_256.c => msvc/Hacl_MAC_Poly1305_Simd256.c} (71%)
 delete mode 100644 src/msvc/Hacl_Poly1305_32.c
 delete mode 100644 src/msvc/Hacl_Streaming_Blake2.c
 delete mode 100644 src/msvc/Hacl_Streaming_Blake2b_256.c
 delete mode 100644 src/msvc/Hacl_Streaming_Blake2s_128.c
 delete mode 100644 src/msvc/Hacl_Streaming_Poly1305_128.c
 delete mode 100644 src/msvc/Hacl_Streaming_Poly1305_256.c
 delete mode 100644 src/msvc/Hacl_Streaming_Poly1305_32.c
 rename src/wasm/{Hacl_Chacha20Poly1305_32.wasm => Hacl_AEAD_Chacha20Poly1305.wasm} (78%)
 create mode 100644 src/wasm/Hacl_AEAD_Chacha20Poly1305_Simd128.wasm
 create mode 100644 src/wasm/Hacl_AEAD_Chacha20Poly1305_Simd256.wasm
 create mode 100644 src/wasm/Hacl_Chacha20_Vec128_Hacl_Chacha20_Vec256.wasm
 delete mode 100644 src/wasm/Hacl_Hash_Blake2.wasm
 create mode 100644 src/wasm/Hacl_Hash_Blake2b.wasm
 delete mode 100644 src/wasm/Hacl_Hash_Blake2b_256.wasm
 create mode 100644 src/wasm/Hacl_Hash_Blake2b_Simd256.wasm
 create mode 100644 src/wasm/Hacl_Hash_Blake2s.wasm
 delete mode 100644 src/wasm/Hacl_Hash_Blake2s_128.wasm
 create mode 100644 src/wasm/Hacl_Hash_Blake2s_Simd128.wasm
 create mode 100644 src/wasm/Hacl_MAC_Poly1305.wasm
 create mode 100644 src/wasm/Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305.wasm
 delete mode 100644 src/wasm/Hacl_Poly1305_32.wasm
 delete mode 100644 src/wasm/Hacl_Streaming_Blake2.wasm
 delete mode 100644 src/wasm/Hacl_Streaming_Blake2b_256.wasm
 delete mode 100644 src/wasm/Hacl_Streaming_Blake2s_128.wasm
 delete mode 100644 src/wasm/Hacl_Streaming_Poly1305_32.wasm

diff --git a/include/EverCrypt_Chacha20Poly1305.h b/include/EverCrypt_Chacha20Poly1305.h
index c3eb2655..bd59e48b 100644
--- a/include/EverCrypt_Chacha20Poly1305.h
+++ b/include/EverCrypt_Chacha20Poly1305.h
@@ -35,9 +35,9 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Chacha20Poly1305_32.h"
-#include "Hacl_Chacha20Poly1305_256.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 #include "EverCrypt_AutoConfig2.h"
 
 void
diff --git a/include/EverCrypt_HMAC.h b/include/EverCrypt_HMAC.h
index 6c64a37f..7d1da14d 100644
--- a/include/EverCrypt_HMAC.h
+++ b/include/EverCrypt_HMAC.h
@@ -38,13 +38,14 @@ extern "C" {
 #include "Hacl_Streaming_Types.h"
 #include "Hacl_Krmllib.h"
 #include "Hacl_Hash_SHA2.h"
-#include "Hacl_Hash_Blake2.h"
+#include "Hacl_Hash_Blake2s.h"
+#include "Hacl_Hash_Blake2b.h"
 
 bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___);
 
 typedef Spec_Hash_Definitions_hash_alg EverCrypt_HMAC_supported_alg;
 
-extern void (*EverCrypt_HMAC_hash_256)(uint8_t *x0, uint32_t x1, uint8_t *x2);
+extern void (*EverCrypt_HMAC_hash_256)(uint8_t *x0, uint8_t *x1, uint32_t x2);
 
 void
 EverCrypt_HMAC_compute(
diff --git a/include/EverCrypt_Hash.h b/include/EverCrypt_Hash.h
index 6791dc27..b35dcf5f 100644
--- a/include/EverCrypt_Hash.h
+++ b/include/EverCrypt_Hash.h
@@ -39,9 +39,10 @@ extern "C" {
 #include "Hacl_Krmllib.h"
 #include "Hacl_Hash_SHA3.h"
 #include "Hacl_Hash_SHA2.h"
-#include "Hacl_Hash_Blake2s_128.h"
-#include "Hacl_Hash_Blake2b_256.h"
-#include "Hacl_Hash_Blake2.h"
+#include "Hacl_Hash_Blake2s_Simd128.h"
+#include "Hacl_Hash_Blake2s.h"
+#include "Hacl_Hash_Blake2b_Simd256.h"
+#include "Hacl_Hash_Blake2b.h"
 #include "EverCrypt_Error.h"
 #include "EverCrypt_AutoConfig2.h"
 
@@ -49,13 +50,13 @@ typedef struct EverCrypt_Hash_state_s_s EverCrypt_Hash_state_s;
 
 uint32_t EverCrypt_Hash_Incremental_hash_len(Spec_Hash_Definitions_hash_alg a);
 
-typedef struct EverCrypt_Hash_Incremental_hash_state_s
+typedef struct EverCrypt_Hash_Incremental_state_t_s
 {
   EverCrypt_Hash_state_s *block_state;
   uint8_t *buf;
   uint64_t total_len;
 }
-EverCrypt_Hash_Incremental_hash_state;
+EverCrypt_Hash_Incremental_state_t;
 
 /**
 Allocate initial state for the agile hash. The argument `a` stands for the
@@ -63,13 +64,13 @@ choice of algorithm (see Hacl_Spec.h). This API will automatically pick the most
 efficient implementation, provided you have called EverCrypt_AutoConfig2_init()
 before. The state is to be freed by calling `free`.
 */
-EverCrypt_Hash_Incremental_hash_state
-*EverCrypt_Hash_Incremental_create_in(Spec_Hash_Definitions_hash_alg a);
+EverCrypt_Hash_Incremental_state_t
+*EverCrypt_Hash_Incremental_malloc(Spec_Hash_Definitions_hash_alg a);
 
 /**
 Reset an existing state to the initial hash state with empty data.
 */
-void EverCrypt_Hash_Incremental_init(EverCrypt_Hash_Incremental_hash_state *s);
+void EverCrypt_Hash_Incremental_reset(EverCrypt_Hash_Incremental_state_t *state);
 
 /**
 Feed an arbitrary amount of data into the hash. This function returns
@@ -80,34 +81,35 @@ algorithm. Both limits are unlikely to be attained in practice.
 */
 EverCrypt_Error_error_code
 EverCrypt_Hash_Incremental_update(
-  EverCrypt_Hash_Incremental_hash_state *s,
-  uint8_t *data,
-  uint32_t len
+  EverCrypt_Hash_Incremental_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
 );
 
 /**
 Perform a run-time test to determine which algorithm was chosen for the given piece of state.
 */
 Spec_Hash_Definitions_hash_alg
-EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_hash_state *s);
+EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_state_t *s);
 
 /**
-Write the resulting hash into `dst`, an array whose length is
+Write the resulting hash into `output`, an array whose length is
 algorithm-specific. You can use the macros defined earlier in this file to
 allocate a destination buffer of the right length. The state remains valid after
-a call to `finish`, meaning the user may feed more data into the hash via
+a call to `digest`, meaning the user may feed more data into the hash via
 `update`. (The finish function operates on an internal copy of the state and
 therefore does not invalidate the client-held state.)
 */
-void EverCrypt_Hash_Incremental_finish(EverCrypt_Hash_Incremental_hash_state *s, uint8_t *dst);
+void
+EverCrypt_Hash_Incremental_digest(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output);
 
 /**
 Free a state previously allocated with `create_in`.
 */
-void EverCrypt_Hash_Incremental_free(EverCrypt_Hash_Incremental_hash_state *s);
+void EverCrypt_Hash_Incremental_free(EverCrypt_Hash_Incremental_state_t *state);
 
 /**
-Hash `input`, of len `len`, into `dst`, an array whose length is determined by
+Hash `input`, of len `input_len`, into `output`, an array whose length is determined by
 your choice of algorithm `a` (see Hacl_Spec.h). You can use the macros defined
 earlier in this file to allocate a destination buffer of the right length. This
 API will automatically pick the most efficient implementation, provided you have
@@ -116,34 +118,34 @@ called EverCrypt_AutoConfig2_init() before.
 void
 EverCrypt_Hash_Incremental_hash(
   Spec_Hash_Definitions_hash_alg a,
-  uint8_t *dst,
+  uint8_t *output,
   uint8_t *input,
-  uint32_t len
+  uint32_t input_len
 );
 
-#define MD5_HASH_LEN ((uint32_t)16U)
+#define MD5_HASH_LEN (16U)
 
-#define SHA1_HASH_LEN ((uint32_t)20U)
+#define SHA1_HASH_LEN (20U)
 
-#define SHA2_224_HASH_LEN ((uint32_t)28U)
+#define SHA2_224_HASH_LEN (28U)
 
-#define SHA2_256_HASH_LEN ((uint32_t)32U)
+#define SHA2_256_HASH_LEN (32U)
 
-#define SHA2_384_HASH_LEN ((uint32_t)48U)
+#define SHA2_384_HASH_LEN (48U)
 
-#define SHA2_512_HASH_LEN ((uint32_t)64U)
+#define SHA2_512_HASH_LEN (64U)
 
-#define SHA3_224_HASH_LEN ((uint32_t)28U)
+#define SHA3_224_HASH_LEN (28U)
 
-#define SHA3_256_HASH_LEN ((uint32_t)32U)
+#define SHA3_256_HASH_LEN (32U)
 
-#define SHA3_384_HASH_LEN ((uint32_t)48U)
+#define SHA3_384_HASH_LEN (48U)
 
-#define SHA3_512_HASH_LEN ((uint32_t)64U)
+#define SHA3_512_HASH_LEN (64U)
 
-#define BLAKE2S_HASH_LEN ((uint32_t)32U)
+#define BLAKE2S_HASH_LEN (32U)
 
-#define BLAKE2B_HASH_LEN ((uint32_t)64U)
+#define BLAKE2B_HASH_LEN (64U)
 
 #if defined(__cplusplus)
 }
diff --git a/include/EverCrypt_Poly1305.h b/include/EverCrypt_Poly1305.h
index 62c00764..fba04059 100644
--- a/include/EverCrypt_Poly1305.h
+++ b/include/EverCrypt_Poly1305.h
@@ -35,12 +35,12 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Poly1305_32.h"
-#include "Hacl_Poly1305_256.h"
-#include "Hacl_Poly1305_128.h"
+#include "Hacl_MAC_Poly1305_Simd256.h"
+#include "Hacl_MAC_Poly1305_Simd128.h"
+#include "Hacl_MAC_Poly1305.h"
 #include "EverCrypt_AutoConfig2.h"
 
-void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key);
+void EverCrypt_Poly1305_mac(uint8_t *output, uint8_t *input, uint32_t input_len, uint8_t *key);
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/Hacl_Chacha20Poly1305_32.h b/include/Hacl_AEAD_Chacha20Poly1305.h
similarity index 67%
rename from include/msvc/Hacl_Chacha20Poly1305_32.h
rename to include/Hacl_AEAD_Chacha20Poly1305.h
index 624e29fb..d20f0554 100644
--- a/include/msvc/Hacl_Chacha20Poly1305_32.h
+++ b/include/Hacl_AEAD_Chacha20Poly1305.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Chacha20Poly1305_32_H
-#define __Hacl_Chacha20Poly1305_32_H
+#ifndef __Hacl_AEAD_Chacha20Poly1305_H
+#define __Hacl_AEAD_Chacha20Poly1305_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,35 +35,33 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Poly1305_32.h"
 #include "Hacl_Chacha20.h"
 
 /**
-Encrypt a message `m` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
+Encrypt a message `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
 */
 void
-Hacl_Chacha20Poly1305_32_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
 );
 
 /**
@@ -88,20 +86,20 @@ If decryption fails, the array `m` remains unchanged and the function returns th
 @returns 0 on succeess; 1 on failure.
 */
 uint32_t
-Hacl_Chacha20Poly1305_32_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
 );
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Chacha20Poly1305_32_H_DEFINED
+#define __Hacl_AEAD_Chacha20Poly1305_H_DEFINED
 #endif
diff --git a/include/Hacl_AEAD_Chacha20Poly1305_Simd128.h b/include/Hacl_AEAD_Chacha20Poly1305_Simd128.h
new file mode 100644
index 00000000..de26c907
--- /dev/null
+++ b/include/Hacl_AEAD_Chacha20Poly1305_Simd128.h
@@ -0,0 +1,104 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_AEAD_Chacha20Poly1305_Simd128_H
+#define __Hacl_AEAD_Chacha20Poly1305_Simd128_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Chacha20_Vec128.h"
+
+/**
+Encrypt a message `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+*/
+void
+Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
+);
+
+/**
+Decrypt a ciphertext `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+If decryption succeeds, the resulting plaintext is stored in `output` and the function returns the success code 0.
+If decryption fails, the array `output` remains unchanged and the function returns the error code 1.
+
+@param output Pointer to `input_len` bytes of memory where the message is written to.
+@param input Pointer to `input_len` bytes of memory where the ciphertext is read from.
+@param input_len Length of the ciphertext.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+@param tag Pointer to 16 bytes of memory where the mac is read from.
+
+@returns 0 on succeess; 1 on failure.
+*/
+uint32_t
+Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_AEAD_Chacha20Poly1305_Simd128_H_DEFINED
+#endif
diff --git a/include/Hacl_AEAD_Chacha20Poly1305_Simd256.h b/include/Hacl_AEAD_Chacha20Poly1305_Simd256.h
new file mode 100644
index 00000000..0abcdc59
--- /dev/null
+++ b/include/Hacl_AEAD_Chacha20Poly1305_Simd256.h
@@ -0,0 +1,104 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_AEAD_Chacha20Poly1305_Simd256_H
+#define __Hacl_AEAD_Chacha20Poly1305_Simd256_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Chacha20_Vec256.h"
+
+/**
+Encrypt a message `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+*/
+void
+Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
+);
+
+/**
+Decrypt a ciphertext `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+If decryption succeeds, the resulting plaintext is stored in `output` and the function returns the success code 0.
+If decryption fails, the array `output` remains unchanged and the function returns the error code 1.
+
+@param output Pointer to `input_len` bytes of memory where the message is written to.
+@param input Pointer to `input_len` bytes of memory where the ciphertext is read from.
+@param input_len Length of the ciphertext.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+@param tag Pointer to 16 bytes of memory where the mac is read from.
+
+@returns 0 on succeess; 1 on failure.
+*/
+uint32_t
+Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_AEAD_Chacha20Poly1305_Simd256_H_DEFINED
+#endif
diff --git a/include/Hacl_Chacha20Poly1305_128.h b/include/Hacl_Chacha20Poly1305_128.h
deleted file mode 100644
index 630fab93..00000000
--- a/include/Hacl_Chacha20Poly1305_128.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Chacha20Poly1305_128_H
-#define __Hacl_Chacha20Poly1305_128_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Poly1305_128.h"
-#include "Hacl_Chacha20_Vec128.h"
-
-/**
-Encrypt a message `m` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
-*/
-void
-Hacl_Chacha20Poly1305_128_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
-);
-
-/**
-Decrypt a ciphertext `cipher` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-If decryption succeeds, the resulting plaintext is stored in `m` and the function returns the success code 0.
-If decryption fails, the array `m` remains unchanged and the function returns the error code 1.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the ciphertext.
-@param m Pointer to `mlen` bytes of memory where the message is written to.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is read from.
-@param mac Pointer to 16 bytes of memory where the mac is read from.
-
-@returns 0 on succeess; 1 on failure.
-*/
-uint32_t
-Hacl_Chacha20Poly1305_128_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
-);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Chacha20Poly1305_128_H_DEFINED
-#endif
diff --git a/include/Hacl_Chacha20Poly1305_256.h b/include/Hacl_Chacha20Poly1305_256.h
deleted file mode 100644
index ff0f2e60..00000000
--- a/include/Hacl_Chacha20Poly1305_256.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Chacha20Poly1305_256_H
-#define __Hacl_Chacha20Poly1305_256_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Poly1305_256.h"
-#include "Hacl_Chacha20_Vec256.h"
-
-/**
-Encrypt a message `m` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
-*/
-void
-Hacl_Chacha20Poly1305_256_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
-);
-
-/**
-Decrypt a ciphertext `cipher` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-If decryption succeeds, the resulting plaintext is stored in `m` and the function returns the success code 0.
-If decryption fails, the array `m` remains unchanged and the function returns the error code 1.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the ciphertext.
-@param m Pointer to `mlen` bytes of memory where the message is written to.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is read from.
-@param mac Pointer to 16 bytes of memory where the mac is read from.
-
-@returns 0 on succeess; 1 on failure.
-*/
-uint32_t
-Hacl_Chacha20Poly1305_256_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
-);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Chacha20Poly1305_256_H_DEFINED
-#endif
diff --git a/include/Hacl_HMAC.h b/include/Hacl_HMAC.h
index 84dbedf5..e1dc04f2 100644
--- a/include/Hacl_HMAC.h
+++ b/include/Hacl_HMAC.h
@@ -37,7 +37,8 @@ extern "C" {
 
 #include "Hacl_Krmllib.h"
 #include "Hacl_Hash_SHA2.h"
-#include "Hacl_Hash_Blake2.h"
+#include "Hacl_Hash_Blake2s.h"
+#include "Hacl_Hash_Blake2b.h"
 
 /**
 Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`.
@@ -46,7 +47,7 @@ The key can be any length and will be hashed if it is longer and padded if it is
 `dst` must point to 20 bytes of memory.
 */
 void
-Hacl_HMAC_legacy_compute_sha1(
+Hacl_HMAC_compute_sha1(
   uint8_t *dst,
   uint8_t *key,
   uint32_t key_len,
diff --git a/include/Hacl_HMAC_Blake2b_256.h b/include/Hacl_HMAC_Blake2b_256.h
index e94ba05f..d8f3e9e1 100644
--- a/include/Hacl_HMAC_Blake2b_256.h
+++ b/include/Hacl_HMAC_Blake2b_256.h
@@ -36,7 +36,7 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Krmllib.h"
-#include "Hacl_Hash_Blake2b_256.h"
+#include "Hacl_Hash_Blake2b_Simd256.h"
 
 /**
 Write the HMAC-BLAKE2b MAC of a message (`data`) by using a key (`key`) into `dst`.
diff --git a/include/Hacl_HMAC_Blake2s_128.h b/include/Hacl_HMAC_Blake2s_128.h
index 7f20343e..5ff79038 100644
--- a/include/Hacl_HMAC_Blake2s_128.h
+++ b/include/Hacl_HMAC_Blake2s_128.h
@@ -35,7 +35,7 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Hash_Blake2s_128.h"
+#include "Hacl_Hash_Blake2s_Simd128.h"
 
 /**
 Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`.
diff --git a/include/Hacl_HPKE_Curve51_CP128_SHA256.h b/include/Hacl_HPKE_Curve51_CP128_SHA256.h
index a768df6b..a46db470 100644
--- a/include/Hacl_HPKE_Curve51_CP128_SHA256.h
+++ b/include/Hacl_HPKE_Curve51_CP128_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve51_CP128_SHA512.h b/include/Hacl_HPKE_Curve51_CP128_SHA512.h
index a4388707..89091754 100644
--- a/include/Hacl_HPKE_Curve51_CP128_SHA512.h
+++ b/include/Hacl_HPKE_Curve51_CP128_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve51_CP256_SHA256.h b/include/Hacl_HPKE_Curve51_CP256_SHA256.h
index 37b26f6a..83ba2adb 100644
--- a/include/Hacl_HPKE_Curve51_CP256_SHA256.h
+++ b/include/Hacl_HPKE_Curve51_CP256_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve51_CP256_SHA512.h b/include/Hacl_HPKE_Curve51_CP256_SHA512.h
index f7240a95..1a796ab7 100644
--- a/include/Hacl_HPKE_Curve51_CP256_SHA512.h
+++ b/include/Hacl_HPKE_Curve51_CP256_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve51_CP32_SHA256.h b/include/Hacl_HPKE_Curve51_CP32_SHA256.h
index e48242e6..d249ba05 100644
--- a/include/Hacl_HPKE_Curve51_CP32_SHA256.h
+++ b/include/Hacl_HPKE_Curve51_CP32_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve51_CP32_SHA512.h b/include/Hacl_HPKE_Curve51_CP32_SHA512.h
index 057f8769..ddc00da3 100644
--- a/include/Hacl_HPKE_Curve51_CP32_SHA512.h
+++ b/include/Hacl_HPKE_Curve51_CP32_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve64_CP128_SHA256.h b/include/Hacl_HPKE_Curve64_CP128_SHA256.h
index 1694a123..fda63e52 100644
--- a/include/Hacl_HPKE_Curve64_CP128_SHA256.h
+++ b/include/Hacl_HPKE_Curve64_CP128_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve64_CP128_SHA512.h b/include/Hacl_HPKE_Curve64_CP128_SHA512.h
index 23f52f25..c8b06ca8 100644
--- a/include/Hacl_HPKE_Curve64_CP128_SHA512.h
+++ b/include/Hacl_HPKE_Curve64_CP128_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve64_CP256_SHA256.h b/include/Hacl_HPKE_Curve64_CP256_SHA256.h
index 33d471bc..2da8dbcf 100644
--- a/include/Hacl_HPKE_Curve64_CP256_SHA256.h
+++ b/include/Hacl_HPKE_Curve64_CP256_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve64_CP256_SHA512.h b/include/Hacl_HPKE_Curve64_CP256_SHA512.h
index d59c1ee4..87d919e1 100644
--- a/include/Hacl_HPKE_Curve64_CP256_SHA512.h
+++ b/include/Hacl_HPKE_Curve64_CP256_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve64_CP32_SHA256.h b/include/Hacl_HPKE_Curve64_CP32_SHA256.h
index 5aaa07e1..bd4b9b59 100644
--- a/include/Hacl_HPKE_Curve64_CP32_SHA256.h
+++ b/include/Hacl_HPKE_Curve64_CP32_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(
diff --git a/include/Hacl_HPKE_Curve64_CP32_SHA512.h b/include/Hacl_HPKE_Curve64_CP32_SHA512.h
index 594000f2..0d2bb8f0 100644
--- a/include/Hacl_HPKE_Curve64_CP32_SHA512.h
+++ b/include/Hacl_HPKE_Curve64_CP32_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(
diff --git a/include/Hacl_HPKE_P256_CP128_SHA256.h b/include/Hacl_HPKE_P256_CP128_SHA256.h
index 613fef83..c76a100d 100644
--- a/include/Hacl_HPKE_P256_CP128_SHA256.h
+++ b/include/Hacl_HPKE_P256_CP128_SHA256.h
@@ -37,7 +37,7 @@ extern "C" {
 
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_P256_CP128_SHA256_setupBaseS(
diff --git a/include/Hacl_HPKE_P256_CP256_SHA256.h b/include/Hacl_HPKE_P256_CP256_SHA256.h
index 6e74b1db..4a33eb8a 100644
--- a/include/Hacl_HPKE_P256_CP256_SHA256.h
+++ b/include/Hacl_HPKE_P256_CP256_SHA256.h
@@ -37,7 +37,7 @@ extern "C" {
 
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_P256_CP256_SHA256_setupBaseS(
diff --git a/include/Hacl_HPKE_P256_CP32_SHA256.h b/include/Hacl_HPKE_P256_CP32_SHA256.h
index 1f8679d4..2818abed 100644
--- a/include/Hacl_HPKE_P256_CP32_SHA256.h
+++ b/include/Hacl_HPKE_P256_CP32_SHA256.h
@@ -37,7 +37,7 @@ extern "C" {
 
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_P256_CP32_SHA256_setupBaseS(
diff --git a/include/Hacl_Hash_Blake2.h b/include/Hacl_Hash_Blake2.h
deleted file mode 100644
index 3ee29015..00000000
--- a/include/Hacl_Hash_Blake2.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Hash_Blake2_H
-#define __Hacl_Hash_Blake2_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Krmllib.h"
-
-void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn);
-
-void
-Hacl_Blake2b_32_blake2b_update_key(
-  uint64_t *wv,
-  uint64_t *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-);
-
-void
-Hacl_Blake2b_32_blake2b_update_multi(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks,
-  uint32_t nb
-);
-
-void
-Hacl_Blake2b_32_blake2b_update_last(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint32_t rem,
-  uint8_t *d
-);
-
-void Hacl_Blake2b_32_blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash);
-
-/**
-Write the BLAKE2b digest of message `d` using key `k` into `output`.
-
-@param nn Length of the to-be-generated digest with 1 <= `nn` <= 64.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2b_32_blake2b(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-);
-
-uint64_t *Hacl_Blake2b_32_blake2b_malloc(void);
-
-void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn);
-
-void
-Hacl_Blake2s_32_blake2s_update_key(
-  uint32_t *wv,
-  uint32_t *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-);
-
-void
-Hacl_Blake2s_32_blake2s_update_multi(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint8_t *blocks,
-  uint32_t nb
-);
-
-void
-Hacl_Blake2s_32_blake2s_update_last(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint32_t rem,
-  uint8_t *d
-);
-
-void Hacl_Blake2s_32_blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash);
-
-/**
-Write the BLAKE2s digest of message `d` using key `k` into `output`.
-
-@param nn Length of to-be-generated digest with 1 <= `nn` <= 32.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2s_32_blake2s(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-);
-
-uint32_t *Hacl_Blake2s_32_blake2s_malloc(void);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Hash_Blake2_H_DEFINED
-#endif
diff --git a/include/Hacl_Streaming_Blake2b_256.h b/include/Hacl_Hash_Blake2b.h
similarity index 56%
rename from include/Hacl_Streaming_Blake2b_256.h
rename to include/Hacl_Hash_Blake2b.h
index 20e42d7c..414574f9 100644
--- a/include/Hacl_Streaming_Blake2b_256.h
+++ b/include/Hacl_Hash_Blake2b.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Blake2b_256_H
-#define __Hacl_Streaming_Blake2b_256_H
+#ifndef __Hacl_Hash_Blake2b_H
+#define __Hacl_Hash_Blake2b_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -37,67 +37,71 @@ extern "C" {
 
 #include "Hacl_Streaming_Types.h"
 #include "Hacl_Krmllib.h"
-#include "Hacl_Hash_Blake2b_256.h"
 
-typedef struct Hacl_Streaming_Blake2b_256_blake2b_256_block_state_s
+typedef struct Hacl_Hash_Blake2b_block_state_t_s
 {
-  Lib_IntVector_Intrinsics_vec256 *fst;
-  Lib_IntVector_Intrinsics_vec256 *snd;
+  uint64_t *fst;
+  uint64_t *snd;
 }
-Hacl_Streaming_Blake2b_256_blake2b_256_block_state;
+Hacl_Hash_Blake2b_block_state_t;
 
-typedef struct Hacl_Streaming_Blake2b_256_blake2b_256_state_s
+typedef struct Hacl_Hash_Blake2b_state_t_s
 {
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state;
+  Hacl_Hash_Blake2b_block_state_t block_state;
   uint8_t *buf;
   uint64_t total_len;
 }
-Hacl_Streaming_Blake2b_256_blake2b_256_state;
+Hacl_Hash_Blake2b_state_t;
 
 /**
   State allocation function when there is no key
 */
-Hacl_Streaming_Blake2b_256_blake2b_256_state
-*Hacl_Streaming_Blake2b_256_blake2b_256_no_key_create_in(void);
+Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void);
 
 /**
-  (Re-)initialization function when there is no key
+  Re-initialization function when there is no key
 */
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *s
-);
+void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *state);
 
 /**
   Update function when there is no key; 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *p,
-  uint8_t *data,
-  uint32_t len
-);
+Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint32_t chunk_len);
 
 /**
   Finish function when there is no key
 */
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *p,
-  uint8_t *dst
-);
+void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output);
 
 /**
   Free state function when there is no key
 */
+void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state);
+
+/**
+Write the BLAKE2b digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
 void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_free(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *s
+Hacl_Hash_Blake2b_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
 );
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Blake2b_256_H_DEFINED
+#define __Hacl_Hash_Blake2b_H_DEFINED
 #endif
diff --git a/include/Hacl_Hash_Blake2b_Simd256.h b/include/Hacl_Hash_Blake2b_Simd256.h
new file mode 100644
index 00000000..adddce66
--- /dev/null
+++ b/include/Hacl_Hash_Blake2b_Simd256.h
@@ -0,0 +1,113 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Hash_Blake2b_Simd256_H
+#define __Hacl_Hash_Blake2b_Simd256_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Streaming_Types.h"
+#include "Hacl_Krmllib.h"
+#include "libintvector.h"
+
+typedef struct Hacl_Hash_Blake2b_Simd256_block_state_t_s
+{
+  Lib_IntVector_Intrinsics_vec256 *fst;
+  Lib_IntVector_Intrinsics_vec256 *snd;
+}
+Hacl_Hash_Blake2b_Simd256_block_state_t;
+
+typedef struct Hacl_Hash_Blake2b_Simd256_state_t_s
+{
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state;
+  uint8_t *buf;
+  uint64_t total_len;
+}
+Hacl_Hash_Blake2b_Simd256_state_t;
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void);
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *state);
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2b_Simd256_update(
+  Hacl_Hash_Blake2b_Simd256_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+);
+
+/**
+  Finish function when there is no key
+*/
+void
+Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output);
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state);
+
+/**
+Write the BLAKE2b digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2b_Simd256_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Hash_Blake2b_Simd256_H_DEFINED
+#endif
diff --git a/include/Hacl_Streaming_Blake2s_128.h b/include/Hacl_Hash_Blake2s.h
similarity index 56%
rename from include/Hacl_Streaming_Blake2s_128.h
rename to include/Hacl_Hash_Blake2s.h
index 60e209ff..2c0d7c5b 100644
--- a/include/Hacl_Streaming_Blake2s_128.h
+++ b/include/Hacl_Hash_Blake2s.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Blake2s_128_H
-#define __Hacl_Streaming_Blake2s_128_H
+#ifndef __Hacl_Hash_Blake2s_H
+#define __Hacl_Hash_Blake2s_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -36,67 +36,71 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Streaming_Types.h"
-#include "Hacl_Hash_Blake2s_128.h"
 
-typedef struct Hacl_Streaming_Blake2s_128_blake2s_128_block_state_s
+typedef struct Hacl_Hash_Blake2s_block_state_t_s
 {
-  Lib_IntVector_Intrinsics_vec128 *fst;
-  Lib_IntVector_Intrinsics_vec128 *snd;
+  uint32_t *fst;
+  uint32_t *snd;
 }
-Hacl_Streaming_Blake2s_128_blake2s_128_block_state;
+Hacl_Hash_Blake2s_block_state_t;
 
-typedef struct Hacl_Streaming_Blake2s_128_blake2s_128_state_s
+typedef struct Hacl_Hash_Blake2s_state_t_s
 {
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state;
+  Hacl_Hash_Blake2s_block_state_t block_state;
   uint8_t *buf;
   uint64_t total_len;
 }
-Hacl_Streaming_Blake2s_128_blake2s_128_state;
+Hacl_Hash_Blake2s_state_t;
 
 /**
   State allocation function when there is no key
 */
-Hacl_Streaming_Blake2s_128_blake2s_128_state
-*Hacl_Streaming_Blake2s_128_blake2s_128_no_key_create_in(void);
+Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void);
 
 /**
-  (Re-)initialization function when there is no key
+  Re-initialization function when there is no key
 */
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *s
-);
+void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *state);
 
 /**
   Update function when there is no key; 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *p,
-  uint8_t *data,
-  uint32_t len
-);
+Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint32_t chunk_len);
 
 /**
   Finish function when there is no key
 */
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *p,
-  uint8_t *dst
-);
+void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output);
 
 /**
   Free state function when there is no key
 */
+void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state);
+
+/**
+Write the BLAKE2s digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
 void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_free(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *s
+Hacl_Hash_Blake2s_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
 );
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Blake2s_128_H_DEFINED
+#define __Hacl_Hash_Blake2s_H_DEFINED
 #endif
diff --git a/include/Hacl_Hash_Blake2s_Simd128.h b/include/Hacl_Hash_Blake2s_Simd128.h
new file mode 100644
index 00000000..6484005e
--- /dev/null
+++ b/include/Hacl_Hash_Blake2s_Simd128.h
@@ -0,0 +1,112 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Hash_Blake2s_Simd128_H
+#define __Hacl_Hash_Blake2s_Simd128_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Streaming_Types.h"
+#include "libintvector.h"
+
+typedef struct Hacl_Hash_Blake2s_Simd128_block_state_t_s
+{
+  Lib_IntVector_Intrinsics_vec128 *fst;
+  Lib_IntVector_Intrinsics_vec128 *snd;
+}
+Hacl_Hash_Blake2s_Simd128_block_state_t;
+
+typedef struct Hacl_Hash_Blake2s_Simd128_state_t_s
+{
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state;
+  uint8_t *buf;
+  uint64_t total_len;
+}
+Hacl_Hash_Blake2s_Simd128_state_t;
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void);
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *state);
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2s_Simd128_update(
+  Hacl_Hash_Blake2s_Simd128_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+);
+
+/**
+  Finish function when there is no key
+*/
+void
+Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output);
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state);
+
+/**
+Write the BLAKE2s digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2s_Simd128_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Hash_Blake2s_Simd128_H_DEFINED
+#endif
diff --git a/include/Hacl_Hash_MD5.h b/include/Hacl_Hash_MD5.h
index dd4c75e0..db93d7d6 100644
--- a/include/Hacl_Hash_MD5.h
+++ b/include/Hacl_Hash_MD5.h
@@ -37,25 +37,25 @@ extern "C" {
 
 #include "Hacl_Streaming_Types.h"
 
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_MD5_state;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_MD5_state_t;
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_malloc(void);
 
-void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_MD5_reset(Hacl_Streaming_MD_state_32 *state);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len);
+Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len);
 
-void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_MD5_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
 
-void Hacl_Streaming_MD5_legacy_free(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_MD5_free(Hacl_Streaming_MD_state_32 *state);
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_state_32 *s0);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_copy(Hacl_Streaming_MD_state_32 *state);
 
-void Hacl_Streaming_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_MD5_hash(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/Hacl_Hash_SHA1.h b/include/Hacl_Hash_SHA1.h
index 2737b20f..19045440 100644
--- a/include/Hacl_Hash_SHA1.h
+++ b/include/Hacl_Hash_SHA1.h
@@ -37,25 +37,25 @@ extern "C" {
 
 #include "Hacl_Streaming_Types.h"
 
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA1_state;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_SHA1_state_t;
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_malloc(void);
 
-void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA1_reset(Hacl_Streaming_MD_state_32 *state);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len);
+Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len);
 
-void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_SHA1_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
 
-void Hacl_Streaming_SHA1_legacy_free(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA1_free(Hacl_Streaming_MD_state_32 *state);
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_state_32 *s0);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_copy(Hacl_Streaming_MD_state_32 *state);
 
-void Hacl_Streaming_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA1_hash(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/Hacl_Hash_SHA2.h b/include/Hacl_Hash_SHA2.h
index 8f98d878..1c2fab71 100644
--- a/include/Hacl_Hash_SHA2.h
+++ b/include/Hacl_Hash_SHA2.h
@@ -38,19 +38,19 @@ extern "C" {
 #include "Hacl_Streaming_Types.h"
 #include "Hacl_Krmllib.h"
 
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA2_state_sha2_224;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_SHA2_state_sha2_224;
 
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA2_state_sha2_256;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_SHA2_state_sha2_256;
 
-typedef Hacl_Streaming_MD_state_64 Hacl_Streaming_SHA2_state_sha2_384;
+typedef Hacl_Streaming_MD_state_64 Hacl_Hash_SHA2_state_sha2_384;
 
-typedef Hacl_Streaming_MD_state_64 Hacl_Streaming_SHA2_state_sha2_512;
+typedef Hacl_Streaming_MD_state_64 Hacl_Hash_SHA2_state_sha2_512;
 
 /**
 Allocate initial state for the SHA2_256 hash. The state is to be freed by
 calling `free_256`.
 */
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_256(void);
 
 /**
 Copies the state passed as argument into a newly allocated state (deep copy).
@@ -58,73 +58,73 @@ The state is to be freed by calling `free_256`. Cloning the state this way is
 useful, for instance, if your control-flow diverges and you need to feed
 more (different) data into the hash in each branch.
 */
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state_32 *s0);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_copy_256(Hacl_Streaming_MD_state_32 *state);
 
 /**
 Reset an existing state to the initial hash state with empty data.
 */
-void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA2_reset_256(Hacl_Streaming_MD_state_32 *state);
 
 /**
 Feed an arbitrary amount of data into the hash. This function returns 0 for
 success, or 1 if the combined length of all of the data passed to `update_256`
-(since the last call to `init_256`) exceeds 2^61-1 bytes.
+(since the last call to `reset_256`) exceeds 2^61-1 bytes.
 
 This function is identical to the update function for SHA2_224.
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_256(
-  Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_256(
+  Hacl_Streaming_MD_state_32 *state,
   uint8_t *input,
   uint32_t input_len
 );
 
 /**
-Write the resulting hash into `dst`, an array of 32 bytes. The state remains
-valid after a call to `finish_256`, meaning the user may feed more data into
-the hash via `update_256`. (The finish_256 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 32 bytes. The state remains
+valid after a call to `digest_256`, meaning the user may feed more data into
+the hash via `update_256`. (The digest_256 function operates on an internal copy of
 the state and therefore does not invalidate the client-held state `p`.)
 */
-void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_256(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
 
 /**
-Free a state allocated with `create_in_256`.
+Free a state allocated with `malloc_256`.
 
 This function is identical to the free function for SHA2_224.
 */
-void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA2_free_256(Hacl_Streaming_MD_state_32 *state);
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 32 bytes.
 */
-void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len);
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_224(void);
 
-void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA2_reset_224(Hacl_Streaming_MD_state_32 *state);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_224(
-  Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_224(
+  Hacl_Streaming_MD_state_32 *state,
   uint8_t *input,
   uint32_t input_len
 );
 
 /**
-Write the resulting hash into `dst`, an array of 28 bytes. The state remains
-valid after a call to `finish_224`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 28 bytes. The state remains
+valid after a call to `digest_224`, meaning the user may feed more data into
 the hash via `update_224`.
 */
-void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
 
-void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_MD_state_32 *p);
+void Hacl_Hash_SHA2_free_224(Hacl_Streaming_MD_state_32 *state);
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 28 bytes.
 */
-void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_224(uint8_t *output, uint8_t *input, uint32_t input_len);
 
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void);
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_512(void);
 
 /**
 Copies the state passed as argument into a newly allocated state (deep copy).
@@ -132,68 +132,68 @@ The state is to be freed by calling `free_512`. Cloning the state this way is
 useful, for instance, if your control-flow diverges and you need to feed
 more (different) data into the hash in each branch.
 */
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state_64 *s0);
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_copy_512(Hacl_Streaming_MD_state_64 *state);
 
-void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s);
+void Hacl_Hash_SHA2_reset_512(Hacl_Streaming_MD_state_64 *state);
 
 /**
 Feed an arbitrary amount of data into the hash. This function returns 0 for
 success, or 1 if the combined length of all of the data passed to `update_512`
-(since the last call to `init_512`) exceeds 2^125-1 bytes.
+(since the last call to `reset_512`) exceeds 2^125-1 bytes.
 
 This function is identical to the update function for SHA2_384.
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_512(
-  Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_512(
+  Hacl_Streaming_MD_state_64 *state,
   uint8_t *input,
   uint32_t input_len
 );
 
 /**
-Write the resulting hash into `dst`, an array of 64 bytes. The state remains
-valid after a call to `finish_512`, meaning the user may feed more data into
-the hash via `update_512`. (The finish_512 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 64 bytes. The state remains
+valid after a call to `digest_512`, meaning the user may feed more data into
+the hash via `update_512`. (The digest_512 function operates on an internal copy of
 the state and therefore does not invalidate the client-held state `p`.)
 */
-void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_512(Hacl_Streaming_MD_state_64 *state, uint8_t *output);
 
 /**
-Free a state allocated with `create_in_512`.
+Free a state allocated with `malloc_512`.
 
 This function is identical to the free function for SHA2_384.
 */
-void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_MD_state_64 *s);
+void Hacl_Hash_SHA2_free_512(Hacl_Streaming_MD_state_64 *state);
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 64 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 64 bytes.
 */
-void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_512(uint8_t *output, uint8_t *input, uint32_t input_len);
 
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void);
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_384(void);
 
-void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s);
+void Hacl_Hash_SHA2_reset_384(Hacl_Streaming_MD_state_64 *state);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_384(
-  Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_384(
+  Hacl_Streaming_MD_state_64 *state,
   uint8_t *input,
   uint32_t input_len
 );
 
 /**
-Write the resulting hash into `dst`, an array of 48 bytes. The state remains
-valid after a call to `finish_384`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 48 bytes. The state remains
+valid after a call to `digest_384`, meaning the user may feed more data into
 the hash via `update_384`.
 */
-void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_384(Hacl_Streaming_MD_state_64 *state, uint8_t *output);
 
-void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_MD_state_64 *p);
+void Hacl_Hash_SHA2_free_384(Hacl_Streaming_MD_state_64 *state);
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 48 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 48 bytes.
 */
-void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_384(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/Hacl_Hash_SHA3.h b/include/Hacl_Hash_SHA3.h
index e2f5ff06..19123304 100644
--- a/include/Hacl_Hash_SHA3.h
+++ b/include/Hacl_Hash_SHA3.h
@@ -37,48 +37,48 @@ extern "C" {
 
 #include "Hacl_Streaming_Types.h"
 
-typedef struct Hacl_Streaming_Keccak_hash_buf_s
+typedef struct Hacl_Hash_SHA3_hash_buf_s
 {
   Spec_Hash_Definitions_hash_alg fst;
   uint64_t *snd;
 }
-Hacl_Streaming_Keccak_hash_buf;
+Hacl_Hash_SHA3_hash_buf;
 
-typedef struct Hacl_Streaming_Keccak_state_s
+typedef struct Hacl_Hash_SHA3_state_t_s
 {
-  Hacl_Streaming_Keccak_hash_buf block_state;
+  Hacl_Hash_SHA3_hash_buf block_state;
   uint8_t *buf;
   uint64_t total_len;
 }
-Hacl_Streaming_Keccak_state;
+Hacl_Hash_SHA3_state_t;
 
-Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s);
+Spec_Hash_Definitions_hash_alg Hacl_Hash_SHA3_get_alg(Hacl_Hash_SHA3_state_t *s);
 
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_hash_alg a);
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_hash_alg a);
 
-void Hacl_Streaming_Keccak_free(Hacl_Streaming_Keccak_state *s);
+void Hacl_Hash_SHA3_free(Hacl_Hash_SHA3_state_t *state);
 
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_state *s0);
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_copy(Hacl_Hash_SHA3_state_t *state);
 
-void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s);
+void Hacl_Hash_SHA3_reset(Hacl_Hash_SHA3_state_t *state);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint32_t len);
+Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t chunk_len);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_finish(Hacl_Streaming_Keccak_state *s, uint8_t *dst);
+Hacl_Hash_SHA3_digest(Hacl_Hash_SHA3_state_t *state, uint8_t *output);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint32_t l);
+Hacl_Hash_SHA3_squeeze(Hacl_Hash_SHA3_state_t *s, uint8_t *dst, uint32_t l);
 
-uint32_t Hacl_Streaming_Keccak_block_len(Hacl_Streaming_Keccak_state *s);
+uint32_t Hacl_Hash_SHA3_block_len(Hacl_Hash_SHA3_state_t *s);
 
-uint32_t Hacl_Streaming_Keccak_hash_len(Hacl_Streaming_Keccak_state *s);
+uint32_t Hacl_Hash_SHA3_hash_len(Hacl_Hash_SHA3_state_t *s);
 
-bool Hacl_Streaming_Keccak_is_shake(Hacl_Streaming_Keccak_state *s);
+bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s);
 
 void
-Hacl_SHA3_shake128_hacl(
+Hacl_Hash_SHA3_shake128_hacl(
   uint32_t inputByteLen,
   uint8_t *input,
   uint32_t outputByteLen,
@@ -86,25 +86,25 @@ Hacl_SHA3_shake128_hacl(
 );
 
 void
-Hacl_SHA3_shake256_hacl(
+Hacl_Hash_SHA3_shake256_hacl(
   uint32_t inputByteLen,
   uint8_t *input,
   uint32_t outputByteLen,
   uint8_t *output
 );
 
-void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
 
-void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
 
-void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
 
-void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
 
-void Hacl_Impl_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s);
+void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s);
 
 void
-Hacl_Impl_SHA3_squeeze(
+Hacl_Hash_SHA3_squeeze0(
   uint64_t *s,
   uint32_t rateInBytes,
   uint32_t outputByteLen,
@@ -112,7 +112,7 @@ Hacl_Impl_SHA3_squeeze(
 );
 
 void
-Hacl_Impl_SHA3_keccak(
+Hacl_Hash_SHA3_keccak(
   uint32_t rate,
   uint32_t capacity,
   uint32_t inputByteLen,
diff --git a/include/Hacl_IntTypes_Intrinsics.h b/include/Hacl_IntTypes_Intrinsics.h
index e2a193e9..c816b046 100644
--- a/include/Hacl_IntTypes_Intrinsics.h
+++ b/include/Hacl_IntTypes_Intrinsics.h
@@ -41,7 +41,7 @@ static inline uint32_t
 Hacl_IntTypes_Intrinsics_add_carry_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r)
 {
   uint64_t res = (uint64_t)x + (uint64_t)cin + (uint64_t)y;
-  uint32_t c = (uint32_t)(res >> (uint32_t)32U);
+  uint32_t c = (uint32_t)(res >> 32U);
   r[0U] = (uint32_t)res;
   return c;
 }
@@ -50,7 +50,7 @@ static inline uint32_t
 Hacl_IntTypes_Intrinsics_sub_borrow_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r)
 {
   uint64_t res = (uint64_t)x - (uint64_t)y - (uint64_t)cin;
-  uint32_t c = (uint32_t)(res >> (uint32_t)32U) & (uint32_t)1U;
+  uint32_t c = (uint32_t)(res >> 32U) & 1U;
   r[0U] = (uint32_t)res;
   return c;
 }
@@ -59,8 +59,7 @@ static inline uint64_t
 Hacl_IntTypes_Intrinsics_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r)
 {
   uint64_t res = x + cin + y;
-  uint64_t
-  c = (~FStar_UInt64_gte_mask(res, x) | (FStar_UInt64_eq_mask(res, x) & cin)) & (uint64_t)1U;
+  uint64_t c = (~FStar_UInt64_gte_mask(res, x) | (FStar_UInt64_eq_mask(res, x) & cin)) & 1ULL;
   r[0U] = res;
   return c;
 }
@@ -73,7 +72,7 @@ Hacl_IntTypes_Intrinsics_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y, ui
   c =
     ((FStar_UInt64_gte_mask(res, x) & ~FStar_UInt64_eq_mask(res, x))
     | (FStar_UInt64_eq_mask(res, x) & cin))
-    & (uint64_t)1U;
+    & 1ULL;
   r[0U] = res;
   return c;
 }
diff --git a/include/Hacl_IntTypes_Intrinsics_128.h b/include/Hacl_IntTypes_Intrinsics_128.h
index aa843a6c..d3008969 100644
--- a/include/Hacl_IntTypes_Intrinsics_128.h
+++ b/include/Hacl_IntTypes_Intrinsics_128.h
@@ -45,7 +45,7 @@ Hacl_IntTypes_Intrinsics_128_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y,
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_uint64_to_uint128(x),
         FStar_UInt128_uint64_to_uint128(cin)),
       FStar_UInt128_uint64_to_uint128(y));
-  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
   r[0U] = FStar_UInt128_uint128_to_uint64(res);
   return c;
 }
@@ -58,10 +58,7 @@ Hacl_IntTypes_Intrinsics_128_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y
     FStar_UInt128_sub_mod(FStar_UInt128_sub_mod(FStar_UInt128_uint64_to_uint128(x),
         FStar_UInt128_uint64_to_uint128(y)),
       FStar_UInt128_uint64_to_uint128(cin));
-  uint64_t
-  c =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U))
-    & (uint64_t)1U;
+  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U)) & 1ULL;
   r[0U] = FStar_UInt128_uint128_to_uint64(res);
   return c;
 }
diff --git a/include/msvc/Hacl_Streaming_Poly1305_32.h b/include/Hacl_MAC_Poly1305.h
similarity index 67%
rename from include/msvc/Hacl_Streaming_Poly1305_32.h
rename to include/Hacl_MAC_Poly1305.h
index 88d1a513..95ac4be2 100644
--- a/include/msvc/Hacl_Streaming_Poly1305_32.h
+++ b/include/Hacl_MAC_Poly1305.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Poly1305_32_H
-#define __Hacl_Streaming_Poly1305_32_H
+#ifndef __Hacl_MAC_Poly1305_H
+#define __Hacl_MAC_Poly1305_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -36,43 +36,36 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Streaming_Types.h"
-#include "Hacl_Poly1305_32.h"
+#include "Hacl_Krmllib.h"
 
-typedef struct Hacl_Streaming_Poly1305_32_poly1305_32_state_s
+typedef struct Hacl_MAC_Poly1305_state_t_s
 {
   uint64_t *block_state;
   uint8_t *buf;
   uint64_t total_len;
   uint8_t *p_key;
 }
-Hacl_Streaming_Poly1305_32_poly1305_32_state;
+Hacl_MAC_Poly1305_state_t;
 
-Hacl_Streaming_Poly1305_32_poly1305_32_state *Hacl_Streaming_Poly1305_32_create_in(uint8_t *k);
+Hacl_MAC_Poly1305_state_t *Hacl_MAC_Poly1305_malloc(uint8_t *key);
 
-void
-Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_32_state *s);
+void Hacl_MAC_Poly1305_reset(Hacl_MAC_Poly1305_state_t *state, uint8_t *key);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_32_update(
-  Hacl_Streaming_Poly1305_32_poly1305_32_state *p,
-  uint8_t *data,
-  uint32_t len
-);
+Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint32_t chunk_len);
 
-void
-Hacl_Streaming_Poly1305_32_finish(
-  Hacl_Streaming_Poly1305_32_poly1305_32_state *p,
-  uint8_t *dst
-);
+void Hacl_MAC_Poly1305_digest(Hacl_MAC_Poly1305_state_t *state, uint8_t *output);
 
-void Hacl_Streaming_Poly1305_32_free(Hacl_Streaming_Poly1305_32_poly1305_32_state *s);
+void Hacl_MAC_Poly1305_free(Hacl_MAC_Poly1305_state_t *state);
+
+void Hacl_MAC_Poly1305_mac(uint8_t *output, uint8_t *input, uint32_t input_len, uint8_t *key);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Poly1305_32_H_DEFINED
+#define __Hacl_MAC_Poly1305_H_DEFINED
 #endif
diff --git a/include/Hacl_Streaming_Poly1305_128.h b/include/Hacl_MAC_Poly1305_Simd128.h
similarity index 67%
rename from include/Hacl_Streaming_Poly1305_128.h
rename to include/Hacl_MAC_Poly1305_Simd128.h
index d6299052..9b69ebd4 100644
--- a/include/Hacl_Streaming_Poly1305_128.h
+++ b/include/Hacl_MAC_Poly1305_Simd128.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Poly1305_128_H
-#define __Hacl_Streaming_Poly1305_128_H
+#ifndef __Hacl_MAC_Poly1305_Simd128_H
+#define __Hacl_MAC_Poly1305_Simd128_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -36,44 +36,47 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Streaming_Types.h"
-#include "Hacl_Poly1305_128.h"
+#include "libintvector.h"
 
-typedef struct Hacl_Streaming_Poly1305_128_poly1305_128_state_s
+typedef struct Hacl_MAC_Poly1305_Simd128_state_t_s
 {
   Lib_IntVector_Intrinsics_vec128 *block_state;
   uint8_t *buf;
   uint64_t total_len;
   uint8_t *p_key;
 }
-Hacl_Streaming_Poly1305_128_poly1305_128_state;
+Hacl_MAC_Poly1305_Simd128_state_t;
 
-Hacl_Streaming_Poly1305_128_poly1305_128_state
-*Hacl_Streaming_Poly1305_128_create_in(uint8_t *k);
+Hacl_MAC_Poly1305_Simd128_state_t *Hacl_MAC_Poly1305_Simd128_malloc(uint8_t *key);
 
-void
-Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly1305_128_state *s);
+void Hacl_MAC_Poly1305_Simd128_reset(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *key);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_128_update(
-  Hacl_Streaming_Poly1305_128_poly1305_128_state *p,
-  uint8_t *data,
-  uint32_t len
+Hacl_MAC_Poly1305_Simd128_update(
+  Hacl_MAC_Poly1305_Simd128_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
 );
 
 void
-Hacl_Streaming_Poly1305_128_finish(
-  Hacl_Streaming_Poly1305_128_poly1305_128_state *p,
-  uint8_t *dst
-);
+Hacl_MAC_Poly1305_Simd128_digest(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *output);
+
+void Hacl_MAC_Poly1305_Simd128_free(Hacl_MAC_Poly1305_Simd128_state_t *state);
 
-void Hacl_Streaming_Poly1305_128_free(Hacl_Streaming_Poly1305_128_poly1305_128_state *s);
+void
+Hacl_MAC_Poly1305_Simd128_mac(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key
+);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Poly1305_128_H_DEFINED
+#define __Hacl_MAC_Poly1305_Simd128_H_DEFINED
 #endif
diff --git a/include/msvc/Hacl_Streaming_Poly1305_256.h b/include/Hacl_MAC_Poly1305_Simd256.h
similarity index 67%
rename from include/msvc/Hacl_Streaming_Poly1305_256.h
rename to include/Hacl_MAC_Poly1305_Simd256.h
index 689b837b..89f4a104 100644
--- a/include/msvc/Hacl_Streaming_Poly1305_256.h
+++ b/include/Hacl_MAC_Poly1305_Simd256.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Poly1305_256_H
-#define __Hacl_Streaming_Poly1305_256_H
+#ifndef __Hacl_MAC_Poly1305_Simd256_H
+#define __Hacl_MAC_Poly1305_Simd256_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -36,44 +36,47 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Streaming_Types.h"
-#include "Hacl_Poly1305_256.h"
+#include "libintvector.h"
 
-typedef struct Hacl_Streaming_Poly1305_256_poly1305_256_state_s
+typedef struct Hacl_MAC_Poly1305_Simd256_state_t_s
 {
   Lib_IntVector_Intrinsics_vec256 *block_state;
   uint8_t *buf;
   uint64_t total_len;
   uint8_t *p_key;
 }
-Hacl_Streaming_Poly1305_256_poly1305_256_state;
+Hacl_MAC_Poly1305_Simd256_state_t;
 
-Hacl_Streaming_Poly1305_256_poly1305_256_state
-*Hacl_Streaming_Poly1305_256_create_in(uint8_t *k);
+Hacl_MAC_Poly1305_Simd256_state_t *Hacl_MAC_Poly1305_Simd256_malloc(uint8_t *key);
 
-void
-Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly1305_256_state *s);
+void Hacl_MAC_Poly1305_Simd256_reset(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *key);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_256_update(
-  Hacl_Streaming_Poly1305_256_poly1305_256_state *p,
-  uint8_t *data,
-  uint32_t len
+Hacl_MAC_Poly1305_Simd256_update(
+  Hacl_MAC_Poly1305_Simd256_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
 );
 
 void
-Hacl_Streaming_Poly1305_256_finish(
-  Hacl_Streaming_Poly1305_256_poly1305_256_state *p,
-  uint8_t *dst
-);
+Hacl_MAC_Poly1305_Simd256_digest(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *output);
+
+void Hacl_MAC_Poly1305_Simd256_free(Hacl_MAC_Poly1305_Simd256_state_t *state);
 
-void Hacl_Streaming_Poly1305_256_free(Hacl_Streaming_Poly1305_256_poly1305_256_state *s);
+void
+Hacl_MAC_Poly1305_Simd256_mac(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key
+);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Poly1305_256_H_DEFINED
+#define __Hacl_MAC_Poly1305_Simd256_H_DEFINED
 #endif
diff --git a/include/Hacl_NaCl.h b/include/Hacl_NaCl.h
index b7e91a4b..a3ca6804 100644
--- a/include/Hacl_NaCl.h
+++ b/include/Hacl_NaCl.h
@@ -36,7 +36,7 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Salsa20.h"
-#include "Hacl_Poly1305_32.h"
+#include "Hacl_MAC_Poly1305.h"
 #include "Hacl_Curve25519_51.h"
 
 /**
diff --git a/include/Hacl_Poly1305_256.h b/include/Hacl_Poly1305_256.h
deleted file mode 100644
index 9d1ae8c3..00000000
--- a/include/Hacl_Poly1305_256.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Poly1305_256_H
-#define __Hacl_Poly1305_256_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "libintvector.h"
-
-typedef Lib_IntVector_Intrinsics_vec256 *Hacl_Poly1305_256_poly1305_ctx;
-
-void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key);
-
-void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *text);
-
-void
-Hacl_Poly1305_256_poly1305_update(
-  Lib_IntVector_Intrinsics_vec256 *ctx,
-  uint32_t len,
-  uint8_t *text
-);
-
-void
-Hacl_Poly1305_256_poly1305_finish(
-  uint8_t *tag,
-  uint8_t *key,
-  Lib_IntVector_Intrinsics_vec256 *ctx
-);
-
-void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Poly1305_256_H_DEFINED
-#endif
diff --git a/include/Hacl_Poly1305_32.h b/include/Hacl_Poly1305_32.h
deleted file mode 100644
index f3233b90..00000000
--- a/include/Hacl_Poly1305_32.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Poly1305_32_H
-#define __Hacl_Poly1305_32_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Krmllib.h"
-
-typedef uint64_t *Hacl_Poly1305_32_poly1305_ctx;
-
-void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key);
-
-void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text);
-
-void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text);
-
-void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx);
-
-void Hacl_Poly1305_32_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Poly1305_32_H_DEFINED
-#endif
diff --git a/include/Hacl_Streaming_Blake2.h b/include/Hacl_Streaming_Blake2.h
deleted file mode 100644
index bfb05e4f..00000000
--- a/include/Hacl_Streaming_Blake2.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Streaming_Blake2_H
-#define __Hacl_Streaming_Blake2_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Streaming_Types.h"
-#include "Hacl_Krmllib.h"
-#include "Hacl_Hash_Blake2.h"
-
-typedef struct Hacl_Streaming_Blake2_blake2s_32_block_state_s
-{
-  uint32_t *fst;
-  uint32_t *snd;
-}
-Hacl_Streaming_Blake2_blake2s_32_block_state;
-
-typedef struct Hacl_Streaming_Blake2_blake2b_32_block_state_s
-{
-  uint64_t *fst;
-  uint64_t *snd;
-}
-Hacl_Streaming_Blake2_blake2b_32_block_state;
-
-typedef struct Hacl_Streaming_Blake2_blake2s_32_state_s
-{
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state;
-  uint8_t *buf;
-  uint64_t total_len;
-}
-Hacl_Streaming_Blake2_blake2s_32_state;
-
-typedef struct Hacl_Streaming_Blake2_blake2b_32_state_s
-{
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state;
-  uint8_t *buf;
-  uint64_t total_len;
-}
-Hacl_Streaming_Blake2_blake2b_32_state;
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2_blake2s_32_state
-*Hacl_Streaming_Blake2_blake2s_32_no_key_create_in(void);
-
-/**
-  (Re-)initialization function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_32_state *s1);
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2_blake2s_32_no_key_update(
-  Hacl_Streaming_Blake2_blake2s_32_state *p,
-  uint8_t *data,
-  uint32_t len
-);
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2_blake2s_32_no_key_finish(
-  Hacl_Streaming_Blake2_blake2s_32_state *p,
-  uint8_t *dst
-);
-
-/**
-  Free state function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2s_32_no_key_free(Hacl_Streaming_Blake2_blake2s_32_state *s1);
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2_blake2b_32_state
-*Hacl_Streaming_Blake2_blake2b_32_no_key_create_in(void);
-
-/**
-  (Re)-initialization function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_32_state *s1);
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2_blake2b_32_no_key_update(
-  Hacl_Streaming_Blake2_blake2b_32_state *p,
-  uint8_t *data,
-  uint32_t len
-);
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
-  Hacl_Streaming_Blake2_blake2b_32_state *p,
-  uint8_t *dst
-);
-
-/**
-  Free state function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2b_32_no_key_free(Hacl_Streaming_Blake2_blake2b_32_state *s1);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Streaming_Blake2_H_DEFINED
-#endif
diff --git a/include/internal/EverCrypt_HMAC.h b/include/internal/EverCrypt_HMAC.h
index 02986e6c..debea462 100644
--- a/include/internal/EverCrypt_HMAC.h
+++ b/include/internal/EverCrypt_HMAC.h
@@ -38,7 +38,9 @@ extern "C" {
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b.h"
+#include "internal/Hacl_HMAC.h"
 #include "internal/EverCrypt_Hash.h"
 #include "../EverCrypt_HMAC.h"
 
diff --git a/include/internal/EverCrypt_Hash.h b/include/internal/EverCrypt_Hash.h
index c9417677..cd706161 100644
--- a/include/internal/EverCrypt_Hash.h
+++ b/include/internal/EverCrypt_Hash.h
@@ -41,11 +41,15 @@ extern "C" {
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
 #include "internal/Hacl_Hash_MD5.h"
+#include "internal/Hacl_Hash_Blake2s_Simd128.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b_Simd256.h"
+#include "internal/Hacl_Hash_Blake2b.h"
 #include "../EverCrypt_Hash.h"
 
 void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n);
 
-void EverCrypt_Hash_Incremental_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void EverCrypt_Hash_Incremental_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/internal/Hacl_Bignum25519_51.h b/include/internal/Hacl_Bignum25519_51.h
index 25a10503..4678f8a0 100644
--- a/include/internal/Hacl_Bignum25519_51.h
+++ b/include/internal/Hacl_Bignum25519_51.h
@@ -69,11 +69,11 @@ static inline void Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1
   uint64_t f23 = f2[3U];
   uint64_t f14 = f1[4U];
   uint64_t f24 = f2[4U];
-  out[0U] = f10 + (uint64_t)0x3fffffffffff68U - f20;
-  out[1U] = f11 + (uint64_t)0x3ffffffffffff8U - f21;
-  out[2U] = f12 + (uint64_t)0x3ffffffffffff8U - f22;
-  out[3U] = f13 + (uint64_t)0x3ffffffffffff8U - f23;
-  out[4U] = f14 + (uint64_t)0x3ffffffffffff8U - f24;
+  out[0U] = f10 + 0x3fffffffffff68ULL - f20;
+  out[1U] = f11 + 0x3ffffffffffff8ULL - f21;
+  out[2U] = f12 + 0x3ffffffffffff8ULL - f22;
+  out[3U] = f13 + 0x3ffffffffffff8ULL - f23;
+  out[4U] = f14 + 0x3ffffffffffff8ULL - f24;
 }
 
 static inline void
@@ -84,7 +84,7 @@ Hacl_Impl_Curve25519_Field51_fmul(
   FStar_UInt128_uint128 *uu___
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f1[0U];
   uint64_t f11 = f1[1U];
   uint64_t f12 = f1[2U];
@@ -95,10 +95,10 @@ Hacl_Impl_Curve25519_Field51_fmul(
   uint64_t f22 = f2[2U];
   uint64_t f23 = f2[3U];
   uint64_t f24 = f2[4U];
-  uint64_t tmp1 = f21 * (uint64_t)19U;
-  uint64_t tmp2 = f22 * (uint64_t)19U;
-  uint64_t tmp3 = f23 * (uint64_t)19U;
-  uint64_t tmp4 = f24 * (uint64_t)19U;
+  uint64_t tmp1 = f21 * 19ULL;
+  uint64_t tmp2 = f22 * 19ULL;
+  uint64_t tmp3 = f23 * 19ULL;
+  uint64_t tmp4 = f24 * 19ULL;
   FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20);
   FStar_UInt128_uint128 o10 = FStar_UInt128_mul_wide(f10, f21);
   FStar_UInt128_uint128 o20 = FStar_UInt128_mul_wide(f10, f22);
@@ -129,25 +129,24 @@ Hacl_Impl_Curve25519_Field51_fmul(
   FStar_UInt128_uint128 tmp_w2 = o24;
   FStar_UInt128_uint128 tmp_w3 = o34;
   FStar_UInt128_uint128 tmp_w4 = o44;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp01 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp01 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp11 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp11 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp21 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp21 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp31 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp31 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp41 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp01 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp41 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp01 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp11 + c5;
   uint64_t o2 = tmp21;
@@ -168,7 +167,7 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   FStar_UInt128_uint128 *uu___
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f1[0U];
   uint64_t f11 = f1[1U];
   uint64_t f12 = f1[2U];
@@ -189,14 +188,14 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   uint64_t f42 = f2[7U];
   uint64_t f43 = f2[8U];
   uint64_t f44 = f2[9U];
-  uint64_t tmp11 = f21 * (uint64_t)19U;
-  uint64_t tmp12 = f22 * (uint64_t)19U;
-  uint64_t tmp13 = f23 * (uint64_t)19U;
-  uint64_t tmp14 = f24 * (uint64_t)19U;
-  uint64_t tmp21 = f41 * (uint64_t)19U;
-  uint64_t tmp22 = f42 * (uint64_t)19U;
-  uint64_t tmp23 = f43 * (uint64_t)19U;
-  uint64_t tmp24 = f44 * (uint64_t)19U;
+  uint64_t tmp11 = f21 * 19ULL;
+  uint64_t tmp12 = f22 * 19ULL;
+  uint64_t tmp13 = f23 * 19ULL;
+  uint64_t tmp14 = f24 * 19ULL;
+  uint64_t tmp21 = f41 * 19ULL;
+  uint64_t tmp22 = f42 * 19ULL;
+  uint64_t tmp23 = f43 * 19ULL;
+  uint64_t tmp24 = f44 * 19ULL;
   FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20);
   FStar_UInt128_uint128 o15 = FStar_UInt128_mul_wide(f10, f21);
   FStar_UInt128_uint128 o25 = FStar_UInt128_mul_wide(f10, f22);
@@ -257,49 +256,47 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   FStar_UInt128_uint128 tmp_w22 = o241;
   FStar_UInt128_uint128 tmp_w23 = o34;
   FStar_UInt128_uint128 tmp_w24 = o44;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w10, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w10, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w11, FStar_UInt128_uint64_to_uint128(c00));
-  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w12, FStar_UInt128_uint64_to_uint128(c10));
-  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w13, FStar_UInt128_uint64_to_uint128(c20));
-  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w14, FStar_UInt128_uint64_to_uint128(c30));
-  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c50 = l_4 >> (uint32_t)51U;
+  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp00 + c40 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c50 = l_4 >> 51U;
   uint64_t o100 = tmp0_;
   uint64_t o112 = tmp10 + c50;
   uint64_t o122 = tmp20;
   uint64_t o132 = tmp30;
   uint64_t o142 = tmp40;
-  FStar_UInt128_uint128
-  l_5 = FStar_UInt128_add(tmp_w20, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U));
+  FStar_UInt128_uint128 l_5 = FStar_UInt128_add(tmp_w20, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, 51U));
   FStar_UInt128_uint128 l_6 = FStar_UInt128_add(tmp_w21, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, 51U));
   FStar_UInt128_uint128 l_7 = FStar_UInt128_add(tmp_w22, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, 51U));
   FStar_UInt128_uint128 l_8 = FStar_UInt128_add(tmp_w23, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, 51U));
   FStar_UInt128_uint128 l_9 = FStar_UInt128_add(tmp_w24, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U));
-  uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_10 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, 51U));
+  uint64_t l_10 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_0 = l_10 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_10 >> 51U;
   uint64_t o200 = tmp0_0;
   uint64_t o212 = tmp1 + c5;
   uint64_t o222 = tmp2;
@@ -339,25 +336,24 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f
   FStar_UInt128_uint128 tmp_w2 = FStar_UInt128_mul_wide(f2, f12);
   FStar_UInt128_uint128 tmp_w3 = FStar_UInt128_mul_wide(f2, f13);
   FStar_UInt128_uint128 tmp_w4 = FStar_UInt128_mul_wide(f2, f14);
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp1 + c5;
   uint64_t o2 = tmp2;
@@ -373,18 +369,18 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f
 static inline void
 Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f0 = f[0U];
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t d0 = (uint64_t)2U * f0;
-  uint64_t d1 = (uint64_t)2U * f1;
-  uint64_t d2 = (uint64_t)38U * f2;
-  uint64_t d3 = (uint64_t)19U * f3;
-  uint64_t d419 = (uint64_t)19U * f4;
-  uint64_t d4 = (uint64_t)2U * d419;
+  uint64_t d0 = 2ULL * f0;
+  uint64_t d1 = 2ULL * f1;
+  uint64_t d2 = 38ULL * f2;
+  uint64_t d3 = 19ULL * f3;
+  uint64_t d419 = 19ULL * f4;
+  uint64_t d4 = 2ULL * d419;
   FStar_UInt128_uint128
   s0 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f0, f0),
@@ -415,25 +411,24 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint
   FStar_UInt128_uint128 o20 = s2;
   FStar_UInt128_uint128 o30 = s3;
   FStar_UInt128_uint128 o40 = s4;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(o00, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(o00, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o10, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o20, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o30, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o40, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp1 + c5;
   uint64_t o2 = tmp2;
@@ -449,7 +444,7 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint
 static inline void
 Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f[0U];
   uint64_t f11 = f[1U];
   uint64_t f12 = f[2U];
@@ -460,12 +455,12 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   uint64_t f22 = f[7U];
   uint64_t f23 = f[8U];
   uint64_t f24 = f[9U];
-  uint64_t d00 = (uint64_t)2U * f10;
-  uint64_t d10 = (uint64_t)2U * f11;
-  uint64_t d20 = (uint64_t)38U * f12;
-  uint64_t d30 = (uint64_t)19U * f13;
-  uint64_t d4190 = (uint64_t)19U * f14;
-  uint64_t d40 = (uint64_t)2U * d4190;
+  uint64_t d00 = 2ULL * f10;
+  uint64_t d10 = 2ULL * f11;
+  uint64_t d20 = 38ULL * f12;
+  uint64_t d30 = 19ULL * f13;
+  uint64_t d4190 = 19ULL * f14;
+  uint64_t d40 = 2ULL * d4190;
   FStar_UInt128_uint128
   s00 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f10, f10),
@@ -496,12 +491,12 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   FStar_UInt128_uint128 o120 = s20;
   FStar_UInt128_uint128 o130 = s30;
   FStar_UInt128_uint128 o140 = s40;
-  uint64_t d0 = (uint64_t)2U * f20;
-  uint64_t d1 = (uint64_t)2U * f21;
-  uint64_t d2 = (uint64_t)38U * f22;
-  uint64_t d3 = (uint64_t)19U * f23;
-  uint64_t d419 = (uint64_t)19U * f24;
-  uint64_t d4 = (uint64_t)2U * d419;
+  uint64_t d0 = 2ULL * f20;
+  uint64_t d1 = 2ULL * f21;
+  uint64_t d2 = 38ULL * f22;
+  uint64_t d3 = 19ULL * f23;
+  uint64_t d419 = 19ULL * f24;
+  uint64_t d4 = 2ULL * d419;
   FStar_UInt128_uint128
   s0 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f20, f20),
@@ -532,49 +527,47 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   FStar_UInt128_uint128 o220 = s2;
   FStar_UInt128_uint128 o230 = s3;
   FStar_UInt128_uint128 o240 = s4;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(o100, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(o100, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o110, FStar_UInt128_uint64_to_uint128(c00));
-  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o120, FStar_UInt128_uint64_to_uint128(c10));
-  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o130, FStar_UInt128_uint64_to_uint128(c20));
-  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o140, FStar_UInt128_uint64_to_uint128(c30));
-  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c50 = l_4 >> (uint32_t)51U;
+  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp00 + c40 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c50 = l_4 >> 51U;
   uint64_t o101 = tmp0_;
   uint64_t o111 = tmp10 + c50;
   uint64_t o121 = tmp20;
   uint64_t o131 = tmp30;
   uint64_t o141 = tmp40;
-  FStar_UInt128_uint128
-  l_5 = FStar_UInt128_add(o200, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U));
+  FStar_UInt128_uint128 l_5 = FStar_UInt128_add(o200, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, 51U));
   FStar_UInt128_uint128 l_6 = FStar_UInt128_add(o210, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, 51U));
   FStar_UInt128_uint128 l_7 = FStar_UInt128_add(o220, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, 51U));
   FStar_UInt128_uint128 l_8 = FStar_UInt128_add(o230, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, 51U));
   FStar_UInt128_uint128 l_9 = FStar_UInt128_add(o240, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U));
-  uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_10 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, 51U));
+  uint64_t l_10 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_0 = l_10 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_10 >> 51U;
   uint64_t o201 = tmp0_0;
   uint64_t o211 = tmp1 + c5;
   uint64_t o221 = tmp2;
@@ -609,49 +602,49 @@ static inline void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t l_ = f0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = f0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = f1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = f2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = f3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = f4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t f01 = tmp0_;
   uint64_t f11 = tmp1 + c5;
   uint64_t f21 = tmp2;
   uint64_t f31 = tmp3;
   uint64_t f41 = tmp4;
-  uint64_t m0 = FStar_UInt64_gte_mask(f01, (uint64_t)0x7ffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f11, (uint64_t)0x7ffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f21, (uint64_t)0x7ffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f31, (uint64_t)0x7ffffffffffffU);
-  uint64_t m4 = FStar_UInt64_eq_mask(f41, (uint64_t)0x7ffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f01, 0x7ffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f11, 0x7ffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f21, 0x7ffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f31, 0x7ffffffffffffULL);
+  uint64_t m4 = FStar_UInt64_eq_mask(f41, 0x7ffffffffffffULL);
   uint64_t mask = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t f0_ = f01 - (mask & (uint64_t)0x7ffffffffffedU);
-  uint64_t f1_ = f11 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f2_ = f21 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f3_ = f31 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f4_ = f41 - (mask & (uint64_t)0x7ffffffffffffU);
+  uint64_t f0_ = f01 - (mask & 0x7ffffffffffedULL);
+  uint64_t f1_ = f11 - (mask & 0x7ffffffffffffULL);
+  uint64_t f2_ = f21 - (mask & 0x7ffffffffffffULL);
+  uint64_t f3_ = f31 - (mask & 0x7ffffffffffffULL);
+  uint64_t f4_ = f41 - (mask & 0x7ffffffffffffULL);
   uint64_t f02 = f0_;
   uint64_t f12 = f1_;
   uint64_t f22 = f2_;
   uint64_t f32 = f3_;
   uint64_t f42 = f4_;
-  uint64_t o00 = f02 | f12 << (uint32_t)51U;
-  uint64_t o10 = f12 >> (uint32_t)13U | f22 << (uint32_t)38U;
-  uint64_t o20 = f22 >> (uint32_t)26U | f32 << (uint32_t)25U;
-  uint64_t o30 = f32 >> (uint32_t)39U | f42 << (uint32_t)12U;
+  uint64_t o00 = f02 | f12 << 51U;
+  uint64_t o10 = f12 >> 13U | f22 << 38U;
+  uint64_t o20 = f22 >> 26U | f32 << 25U;
+  uint64_t o30 = f32 >> 39U | f42 << 12U;
   uint64_t o0 = o00;
   uint64_t o1 = o10;
   uint64_t o2 = o20;
@@ -665,11 +658,11 @@ static inline void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint
 static inline void
 Hacl_Impl_Curve25519_Field51_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2)
 {
-  uint64_t mask = (uint64_t)0U - bit;
+  uint64_t mask = 0ULL - bit;
   KRML_MAYBE_FOR10(i,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
+    0U,
+    10U,
+    1U,
     uint64_t dummy = mask & (p1[i] ^ p2[i]);
     p1[i] = p1[i] ^ dummy;
     p2[i] = p2[i] ^ dummy;);
diff --git a/include/internal/Hacl_Bignum_Base.h b/include/internal/Hacl_Bignum_Base.h
index 2cfb0066..f2e282f4 100644
--- a/include/internal/Hacl_Bignum_Base.h
+++ b/include/internal/Hacl_Bignum_Base.h
@@ -45,7 +45,7 @@ Hacl_Bignum_Base_mul_wide_add2_u32(uint32_t a, uint32_t b, uint32_t c_in, uint32
   uint32_t out0 = out[0U];
   uint64_t res = (uint64_t)a * (uint64_t)b + (uint64_t)c_in + (uint64_t)out0;
   out[0U] = (uint32_t)res;
-  return (uint32_t)(res >> (uint32_t)32U);
+  return (uint32_t)(res >> 32U);
 }
 
 static inline uint64_t
@@ -58,22 +58,22 @@ Hacl_Bignum_Base_mul_wide_add2_u64(uint64_t a, uint64_t b, uint64_t c_in, uint64
         FStar_UInt128_uint64_to_uint128(c_in)),
       FStar_UInt128_uint64_to_uint128(out0));
   out[0U] = FStar_UInt128_uint128_to_uint64(res);
-  return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+  return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
 }
 
 static inline void
 Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -82,24 +82,24 @@ Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *
 static inline void
 Hacl_Bignum_Convert_bn_to_bytes_be_uint64(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]);
+    store64_be(tmp + i * 8U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
 
 static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32_t *b)
 {
-  uint32_t priv = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t priv = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
-    uint32_t mask = FStar_UInt32_eq_mask(b[i], (uint32_t)0U);
+    uint32_t mask = FStar_UInt32_eq_mask(b[i], 0U);
     priv = (mask & priv) | (~mask & i);
   }
   return priv;
@@ -107,10 +107,10 @@ static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32
 
 static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64_t *b)
 {
-  uint64_t priv = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t priv = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
-    uint64_t mask = FStar_UInt64_eq_mask(b[i], (uint64_t)0U);
+    uint64_t mask = FStar_UInt64_eq_mask(b[i], 0ULL);
     priv = (mask & priv) | (~mask & (uint64_t)i);
   }
   return priv;
@@ -119,63 +119,63 @@ static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64
 static inline uint32_t
 Hacl_Bignum_Lib_bn_get_bits_u32(uint32_t len, uint32_t *b, uint32_t i, uint32_t l)
 {
-  uint32_t i1 = i / (uint32_t)32U;
-  uint32_t j = i % (uint32_t)32U;
+  uint32_t i1 = i / 32U;
+  uint32_t j = i % 32U;
   uint32_t p1 = b[i1] >> j;
   uint32_t ite;
-  if (i1 + (uint32_t)1U < len && (uint32_t)0U < j)
+  if (i1 + 1U < len && 0U < j)
   {
-    ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j);
+    ite = p1 | b[i1 + 1U] << (32U - j);
   }
   else
   {
     ite = p1;
   }
-  return ite & (((uint32_t)1U << l) - (uint32_t)1U);
+  return ite & ((1U << l) - 1U);
 }
 
 static inline uint64_t
 Hacl_Bignum_Lib_bn_get_bits_u64(uint32_t len, uint64_t *b, uint32_t i, uint32_t l)
 {
-  uint32_t i1 = i / (uint32_t)64U;
-  uint32_t j = i % (uint32_t)64U;
+  uint32_t i1 = i / 64U;
+  uint32_t j = i % 64U;
   uint64_t p1 = b[i1] >> j;
   uint64_t ite;
-  if (i1 + (uint32_t)1U < len && (uint32_t)0U < j)
+  if (i1 + 1U < len && 0U < j)
   {
-    ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j);
+    ite = p1 | b[i1 + 1U] << (64U - j);
   }
   else
   {
     ite = p1;
   }
-  return ite & (((uint64_t)1U << l) - (uint64_t)1U);
+  return ite & ((1ULL << l) - 1ULL);
 }
 
 static inline uint32_t
 Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -188,27 +188,27 @@ Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b,
 static inline uint64_t
 Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -221,27 +221,27 @@ Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b,
 static inline uint32_t
 Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -254,27 +254,27 @@ Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b,
 static inline uint64_t
 Hacl_Bignum_Addition_bn_add_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -294,27 +294,27 @@ Hacl_Bignum_Multiplication_bn_mul_u32(
 )
 {
   memset(res, 0U, (aLen + bLen) * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bLen; i0++)
+  for (uint32_t i0 = 0U; i0 < bLen; i0++)
   {
     uint32_t bj = b[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < aLen / 4U; i++)
     {
-      uint32_t a_i = a[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = a[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0);
-      uint32_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = a[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1);
-      uint32_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = a[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2);
-      uint32_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = a[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+    for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
     {
       uint32_t a_i = a[i];
       uint32_t *res_i = res_j + i;
@@ -335,27 +335,27 @@ Hacl_Bignum_Multiplication_bn_mul_u64(
 )
 {
   memset(res, 0U, (aLen + bLen) * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bLen; i0++)
+  for (uint32_t i0 = 0U; i0 < bLen; i0++)
   {
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < aLen / 4U; i++)
     {
-      uint64_t a_i = a[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = a[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+    for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
     {
       uint64_t a_i = a[i];
       uint64_t *res_i = res_j + i;
@@ -370,28 +370,28 @@ static inline void
 Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res)
 {
   memset(res, 0U, (aLen + aLen) * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < aLen; i0++)
+  for (uint32_t i0 = 0U; i0 < aLen; i0++)
   {
     uint32_t *ab = a;
     uint32_t a_j = a[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -401,48 +401,48 @@ Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res)
     res[i0 + i0] = r;
   }
   uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  KRML_MAYBE_UNUSED_VAR(c0);
   KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen);
   uint32_t tmp[aLen + aLen];
   memset(tmp, 0U, (aLen + aLen) * sizeof (uint32_t));
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i];
-    uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res1 >> 32U);
     uint32_t lo = (uint32_t)res1;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;
   }
   uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void
 Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res)
 {
   memset(res, 0U, (aLen + aLen) * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < aLen; i0++)
+  for (uint32_t i0 = 0U; i0 < aLen; i0++)
   {
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -452,20 +452,20 @@ Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res)
     res[i0 + i0] = r;
   }
   uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  KRML_MAYBE_UNUSED_VAR(c0);
   KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen);
   uint64_t tmp[aLen + aLen];
   memset(tmp, 0U, (aLen + aLen) * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;
   }
   uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Bignum_K256.h b/include/internal/Hacl_Bignum_K256.h
index 59aff176..fe72fffe 100644
--- a/include/internal/Hacl_Bignum_K256.h
+++ b/include/internal/Hacl_Bignum_K256.h
@@ -45,13 +45,7 @@ static inline bool Hacl_K256_Field_is_felem_zero_vartime(uint64_t *f)
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  return
-    f0
-    == (uint64_t)0U
-    && f1 == (uint64_t)0U
-    && f2 == (uint64_t)0U
-    && f3 == (uint64_t)0U
-    && f4 == (uint64_t)0U;
+  return f0 == 0ULL && f1 == 0ULL && f2 == 0ULL && f3 == 0ULL && f4 == 0ULL;
 }
 
 static inline bool Hacl_K256_Field_is_felem_eq_vartime(uint64_t *f1, uint64_t *f2)
@@ -76,42 +70,42 @@ static inline bool Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(uint64_
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  if (f4 > (uint64_t)0U)
+  if (f4 > 0ULL)
   {
     return false;
   }
-  if (f3 > (uint64_t)0U)
+  if (f3 > 0ULL)
   {
     return false;
   }
-  if (f2 < (uint64_t)0x1455123U)
+  if (f2 < 0x1455123ULL)
   {
     return true;
   }
-  if (f2 > (uint64_t)0x1455123U)
+  if (f2 > 0x1455123ULL)
   {
     return false;
   }
-  if (f1 < (uint64_t)0x1950b75fc4402U)
+  if (f1 < 0x1950b75fc4402ULL)
   {
     return true;
   }
-  if (f1 > (uint64_t)0x1950b75fc4402U)
+  if (f1 > 0x1950b75fc4402ULL)
   {
     return false;
   }
-  return f0 < (uint64_t)0xda1722fc9baeeU;
+  return f0 < 0xda1722fc9baeeULL;
 }
 
 static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b)
 {
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = b + i * (uint32_t)8U;
+    uint8_t *bj = b + i * 8U;
     uint64_t u = load64_be(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -120,11 +114,11 @@ static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b)
   uint64_t s1 = tmp[2U];
   uint64_t s2 = tmp[1U];
   uint64_t s3 = tmp[0U];
-  uint64_t f00 = s0 & (uint64_t)0xfffffffffffffU;
-  uint64_t f10 = s0 >> (uint32_t)52U | (s1 & (uint64_t)0xffffffffffU) << (uint32_t)12U;
-  uint64_t f20 = s1 >> (uint32_t)40U | (s2 & (uint64_t)0xfffffffU) << (uint32_t)24U;
-  uint64_t f30 = s2 >> (uint32_t)28U | (s3 & (uint64_t)0xffffU) << (uint32_t)36U;
-  uint64_t f40 = s3 >> (uint32_t)16U;
+  uint64_t f00 = s0 & 0xfffffffffffffULL;
+  uint64_t f10 = s0 >> 52U | (s1 & 0xffffffffffULL) << 12U;
+  uint64_t f20 = s1 >> 40U | (s2 & 0xfffffffULL) << 24U;
+  uint64_t f30 = s2 >> 28U | (s3 & 0xffffULL) << 36U;
+  uint64_t f40 = s3 >> 16U;
   uint64_t f0 = f00;
   uint64_t f1 = f10;
   uint64_t f2 = f20;
@@ -148,11 +142,11 @@ static inline bool Hacl_K256_Field_load_felem_lt_prime_vartime(uint64_t *f, uint
   bool
   is_ge_p =
     f0
-    >= (uint64_t)0xffffefffffc2fU
-    && f1 == (uint64_t)0xfffffffffffffU
-    && f2 == (uint64_t)0xfffffffffffffU
-    && f3 == (uint64_t)0xfffffffffffffU
-    && f4 == (uint64_t)0xffffffffffffU;
+    >= 0xffffefffffc2fULL
+    && f1 == 0xfffffffffffffULL
+    && f2 == 0xfffffffffffffULL
+    && f3 == 0xfffffffffffffULL
+    && f4 == 0xffffffffffffULL;
   return !is_ge_p;
 }
 
@@ -164,10 +158,10 @@ static inline void Hacl_K256_Field_store_felem(uint8_t *b, uint64_t *f)
   uint64_t f20 = f[2U];
   uint64_t f30 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t o0 = f00 | f10 << (uint32_t)52U;
-  uint64_t o1 = f10 >> (uint32_t)12U | f20 << (uint32_t)40U;
-  uint64_t o2 = f20 >> (uint32_t)24U | f30 << (uint32_t)28U;
-  uint64_t o3 = f30 >> (uint32_t)36U | f4 << (uint32_t)16U;
+  uint64_t o0 = f00 | f10 << 52U;
+  uint64_t o1 = f10 >> 12U | f20 << 40U;
+  uint64_t o2 = f20 >> 24U | f30 << 28U;
+  uint64_t o3 = f30 >> 36U | f4 << 16U;
   uint64_t f0 = o0;
   uint64_t f1 = o1;
   uint64_t f2 = o2;
@@ -176,11 +170,7 @@ static inline void Hacl_K256_Field_store_felem(uint8_t *b, uint64_t *f)
   tmp[1U] = f2;
   tmp[2U] = f1;
   tmp[3U] = f0;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(b + i * (uint32_t)8U, tmp[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(b + i * 8U, tmp[i]););
 }
 
 static inline void Hacl_K256_Field_fmul_small_num(uint64_t *out, uint64_t *f, uint64_t num)
@@ -248,11 +238,11 @@ static inline void Hacl_K256_Field_fsub(uint64_t *out, uint64_t *f1, uint64_t *f
   uint64_t b2 = f2[2U];
   uint64_t b3 = f2[3U];
   uint64_t b4 = f2[4U];
-  uint64_t r00 = (uint64_t)9007190664804446U * x - b0;
-  uint64_t r10 = (uint64_t)9007199254740990U * x - b1;
-  uint64_t r20 = (uint64_t)9007199254740990U * x - b2;
-  uint64_t r30 = (uint64_t)9007199254740990U * x - b3;
-  uint64_t r40 = (uint64_t)562949953421310U * x - b4;
+  uint64_t r00 = 9007190664804446ULL * x - b0;
+  uint64_t r10 = 9007199254740990ULL * x - b1;
+  uint64_t r20 = 9007199254740990ULL * x - b2;
+  uint64_t r30 = 9007199254740990ULL * x - b3;
+  uint64_t r40 = 562949953421310ULL * x - b4;
   uint64_t r0 = r00;
   uint64_t r1 = r10;
   uint64_t r2 = r20;
@@ -287,7 +277,7 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   uint64_t b2 = f2[2U];
   uint64_t b3 = f2[3U];
   uint64_t b4 = f2[4U];
-  uint64_t r = (uint64_t)0x1000003D10U;
+  uint64_t r = 0x1000003D10ULL;
   FStar_UInt128_uint128
   d0 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0,
@@ -298,9 +288,9 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, b4);
   FStar_UInt128_uint128
   d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0)));
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U));
-  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U);
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, 64U));
+  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, 52U);
   FStar_UInt128_uint128
   d3 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2,
@@ -309,12 +299,11 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
           FStar_UInt128_mul_wide(a2, b2)),
         FStar_UInt128_mul_wide(a3, b1)),
       FStar_UInt128_mul_wide(a4, b0));
-  FStar_UInt128_uint128
-  d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1));
-  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U);
-  uint64_t tx = t4 >> (uint32_t)48U;
-  uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU;
+  FStar_UInt128_uint128 d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << 12U, c1));
+  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, 52U);
+  uint64_t tx = t4 >> 48U;
+  uint64_t t4_ = t4 & 0xffffffffffffULL;
   FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, b0);
   FStar_UInt128_uint128
   d6 =
@@ -323,13 +312,12 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
           FStar_UInt128_mul_wide(a2, b3)),
         FStar_UInt128_mul_wide(a3, b2)),
       FStar_UInt128_mul_wide(a4, b1));
-  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U);
-  uint64_t u0_ = tx | u0 << (uint32_t)4U;
-  FStar_UInt128_uint128
-  c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U));
-  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U);
+  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, 52U);
+  uint64_t u0_ = tx | u0 << 4U;
+  FStar_UInt128_uint128 c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> 4U));
+  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, 52U);
   FStar_UInt128_uint128
   c5 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a0, b1)),
@@ -343,10 +331,10 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   FStar_UInt128_uint128
   c6 =
     FStar_UInt128_add_mod(c5,
-      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r));
-  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U);
-  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U);
+      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & 0xfffffffffffffULL, r));
+  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, 52U);
+  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, 52U);
   FStar_UInt128_uint128
   c8 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7,
@@ -359,16 +347,15 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
       FStar_UInt128_mul_wide(a4, b3));
   FStar_UInt128_uint128
   c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10)));
-  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U));
-  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U);
+  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, 64U));
+  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, 52U);
   FStar_UInt128_uint128
   c11 =
-    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10,
-        FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)),
+    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, FStar_UInt128_mul_wide(r << 12U, d11)),
       FStar_UInt128_uint64_to_uint128(t3));
-  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU;
-  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U));
+  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & 0xfffffffffffffULL;
+  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, 52U));
   uint64_t r4 = c12 + t4_;
   uint64_t f0 = r0;
   uint64_t f11 = r1;
@@ -389,43 +376,41 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
   uint64_t a4 = f[4U];
-  uint64_t r = (uint64_t)0x1000003D10U;
+  uint64_t r = 0x1000003D10ULL;
   FStar_UInt128_uint128
   d0 =
-    FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0 * (uint64_t)2U, a3),
-      FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a2));
+    FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0 * 2ULL, a3),
+      FStar_UInt128_mul_wide(a1 * 2ULL, a2));
   FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, a4);
   FStar_UInt128_uint128
   d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0)));
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U));
-  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U);
-  uint64_t a41 = a4 * (uint64_t)2U;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, 64U));
+  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, 52U);
+  uint64_t a41 = a4 * 2ULL;
   FStar_UInt128_uint128
   d3 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2,
           FStar_UInt128_mul_wide(a0, a41)),
-        FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a3)),
+        FStar_UInt128_mul_wide(a1 * 2ULL, a3)),
       FStar_UInt128_mul_wide(a2, a2));
-  FStar_UInt128_uint128
-  d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1));
-  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U);
-  uint64_t tx = t4 >> (uint32_t)48U;
-  uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU;
+  FStar_UInt128_uint128 d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << 12U, c1));
+  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, 52U);
+  uint64_t tx = t4 >> 48U;
+  uint64_t t4_ = t4 & 0xffffffffffffULL;
   FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, a0);
   FStar_UInt128_uint128
   d6 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(d5, FStar_UInt128_mul_wide(a1, a41)),
-      FStar_UInt128_mul_wide(a2 * (uint64_t)2U, a3));
-  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U);
-  uint64_t u0_ = tx | u0 << (uint32_t)4U;
-  FStar_UInt128_uint128
-  c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U));
-  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U);
-  uint64_t a01 = a0 * (uint64_t)2U;
+      FStar_UInt128_mul_wide(a2 * 2ULL, a3));
+  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, 52U);
+  uint64_t u0_ = tx | u0 << 4U;
+  FStar_UInt128_uint128 c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> 4U));
+  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, 52U);
+  uint64_t a01 = a0 * 2ULL;
   FStar_UInt128_uint128 c5 = FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a01, a1));
   FStar_UInt128_uint128
   d8 =
@@ -434,10 +419,10 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   FStar_UInt128_uint128
   c6 =
     FStar_UInt128_add_mod(c5,
-      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r));
-  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U);
-  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U);
+      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & 0xfffffffffffffULL, r));
+  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, 52U);
+  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, 52U);
   FStar_UInt128_uint128
   c8 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7, FStar_UInt128_mul_wide(a01, a2)),
@@ -445,16 +430,15 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   FStar_UInt128_uint128 d10 = FStar_UInt128_add_mod(d9, FStar_UInt128_mul_wide(a3, a41));
   FStar_UInt128_uint128
   c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10)));
-  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U));
-  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U);
+  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, 64U));
+  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, 52U);
   FStar_UInt128_uint128
   c11 =
-    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10,
-        FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)),
+    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, FStar_UInt128_mul_wide(r << 12U, d11)),
       FStar_UInt128_uint64_to_uint128(t3));
-  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU;
-  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U));
+  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & 0xfffffffffffffULL;
+  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, 52U));
   uint64_t r4 = c12 + t4_;
   uint64_t f0 = r0;
   uint64_t f1 = r1;
@@ -475,23 +459,23 @@ static inline void Hacl_K256_Field_fnormalize_weak(uint64_t *out, uint64_t *f)
   uint64_t t2 = f[2U];
   uint64_t t3 = f[3U];
   uint64_t t4 = f[4U];
-  uint64_t x0 = t4 >> (uint32_t)48U;
-  uint64_t t410 = t4 & (uint64_t)0xffffffffffffU;
+  uint64_t x0 = t4 >> 48U;
+  uint64_t t410 = t4 & 0xffffffffffffULL;
   uint64_t x = x0;
   uint64_t t01 = t0;
   uint64_t t11 = t1;
   uint64_t t21 = t2;
   uint64_t t31 = t3;
   uint64_t t41 = t410;
-  uint64_t t02 = t01 + x * (uint64_t)0x1000003D1U;
-  uint64_t t12 = t11 + (t02 >> (uint32_t)52U);
-  uint64_t t03 = t02 & (uint64_t)0xfffffffffffffU;
-  uint64_t t22 = t21 + (t12 >> (uint32_t)52U);
-  uint64_t t13 = t12 & (uint64_t)0xfffffffffffffU;
-  uint64_t t32 = t31 + (t22 >> (uint32_t)52U);
-  uint64_t t23 = t22 & (uint64_t)0xfffffffffffffU;
-  uint64_t t42 = t41 + (t32 >> (uint32_t)52U);
-  uint64_t t33 = t32 & (uint64_t)0xfffffffffffffU;
+  uint64_t t02 = t01 + x * 0x1000003D1ULL;
+  uint64_t t12 = t11 + (t02 >> 52U);
+  uint64_t t03 = t02 & 0xfffffffffffffULL;
+  uint64_t t22 = t21 + (t12 >> 52U);
+  uint64_t t13 = t12 & 0xfffffffffffffULL;
+  uint64_t t32 = t31 + (t22 >> 52U);
+  uint64_t t23 = t22 & 0xfffffffffffffULL;
+  uint64_t t42 = t41 + (t32 >> 52U);
+  uint64_t t33 = t32 & 0xfffffffffffffULL;
   uint64_t f0 = t03;
   uint64_t f1 = t13;
   uint64_t f2 = t23;
@@ -511,59 +495,59 @@ static inline void Hacl_K256_Field_fnormalize(uint64_t *out, uint64_t *f)
   uint64_t f20 = f[2U];
   uint64_t f30 = f[3U];
   uint64_t f40 = f[4U];
-  uint64_t x0 = f40 >> (uint32_t)48U;
-  uint64_t t40 = f40 & (uint64_t)0xffffffffffffU;
+  uint64_t x0 = f40 >> 48U;
+  uint64_t t40 = f40 & 0xffffffffffffULL;
   uint64_t x1 = x0;
   uint64_t t00 = f00;
   uint64_t t10 = f10;
   uint64_t t20 = f20;
   uint64_t t30 = f30;
   uint64_t t42 = t40;
-  uint64_t t01 = t00 + x1 * (uint64_t)0x1000003D1U;
-  uint64_t t110 = t10 + (t01 >> (uint32_t)52U);
-  uint64_t t020 = t01 & (uint64_t)0xfffffffffffffU;
-  uint64_t t210 = t20 + (t110 >> (uint32_t)52U);
-  uint64_t t120 = t110 & (uint64_t)0xfffffffffffffU;
-  uint64_t t310 = t30 + (t210 >> (uint32_t)52U);
-  uint64_t t220 = t210 & (uint64_t)0xfffffffffffffU;
-  uint64_t t410 = t42 + (t310 >> (uint32_t)52U);
-  uint64_t t320 = t310 & (uint64_t)0xfffffffffffffU;
+  uint64_t t01 = t00 + x1 * 0x1000003D1ULL;
+  uint64_t t110 = t10 + (t01 >> 52U);
+  uint64_t t020 = t01 & 0xfffffffffffffULL;
+  uint64_t t210 = t20 + (t110 >> 52U);
+  uint64_t t120 = t110 & 0xfffffffffffffULL;
+  uint64_t t310 = t30 + (t210 >> 52U);
+  uint64_t t220 = t210 & 0xfffffffffffffULL;
+  uint64_t t410 = t42 + (t310 >> 52U);
+  uint64_t t320 = t310 & 0xfffffffffffffULL;
   uint64_t t0 = t020;
   uint64_t t1 = t120;
   uint64_t t2 = t220;
   uint64_t t3 = t320;
   uint64_t t4 = t410;
-  uint64_t x2 = t4 >> (uint32_t)48U;
-  uint64_t t411 = t4 & (uint64_t)0xffffffffffffU;
+  uint64_t x2 = t4 >> 48U;
+  uint64_t t411 = t4 & 0xffffffffffffULL;
   uint64_t x = x2;
   uint64_t r0 = t0;
   uint64_t r1 = t1;
   uint64_t r2 = t2;
   uint64_t r3 = t3;
   uint64_t r4 = t411;
-  uint64_t m4 = FStar_UInt64_eq_mask(r4, (uint64_t)0xffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(r3, (uint64_t)0xfffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(r2, (uint64_t)0xfffffffffffffU);
-  uint64_t m1 = FStar_UInt64_eq_mask(r1, (uint64_t)0xfffffffffffffU);
-  uint64_t m0 = FStar_UInt64_gte_mask(r0, (uint64_t)0xffffefffffc2fU);
+  uint64_t m4 = FStar_UInt64_eq_mask(r4, 0xffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(r3, 0xfffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(r2, 0xfffffffffffffULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(r1, 0xfffffffffffffULL);
+  uint64_t m0 = FStar_UInt64_gte_mask(r0, 0xffffefffffc2fULL);
   uint64_t is_ge_p_m = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t m_to_one = is_ge_p_m & (uint64_t)1U;
+  uint64_t m_to_one = is_ge_p_m & 1ULL;
   uint64_t x10 = m_to_one | x;
-  uint64_t t010 = r0 + x10 * (uint64_t)0x1000003D1U;
-  uint64_t t11 = r1 + (t010 >> (uint32_t)52U);
-  uint64_t t02 = t010 & (uint64_t)0xfffffffffffffU;
-  uint64_t t21 = r2 + (t11 >> (uint32_t)52U);
-  uint64_t t12 = t11 & (uint64_t)0xfffffffffffffU;
-  uint64_t t31 = r3 + (t21 >> (uint32_t)52U);
-  uint64_t t22 = t21 & (uint64_t)0xfffffffffffffU;
-  uint64_t t41 = r4 + (t31 >> (uint32_t)52U);
-  uint64_t t32 = t31 & (uint64_t)0xfffffffffffffU;
+  uint64_t t010 = r0 + x10 * 0x1000003D1ULL;
+  uint64_t t11 = r1 + (t010 >> 52U);
+  uint64_t t02 = t010 & 0xfffffffffffffULL;
+  uint64_t t21 = r2 + (t11 >> 52U);
+  uint64_t t12 = t11 & 0xfffffffffffffULL;
+  uint64_t t31 = r3 + (t21 >> 52U);
+  uint64_t t22 = t21 & 0xfffffffffffffULL;
+  uint64_t t41 = r4 + (t31 >> 52U);
+  uint64_t t32 = t31 & 0xfffffffffffffULL;
   uint64_t s0 = t02;
   uint64_t s1 = t12;
   uint64_t s2 = t22;
   uint64_t s3 = t32;
   uint64_t s4 = t41;
-  uint64_t t412 = s4 & (uint64_t)0xffffffffffffU;
+  uint64_t t412 = s4 & 0xffffffffffffULL;
   uint64_t k0 = s0;
   uint64_t k1 = s1;
   uint64_t k2 = s2;
@@ -590,11 +574,11 @@ static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool
     uint64_t a2 = f[2U];
     uint64_t a3 = f[3U];
     uint64_t a4 = f[4U];
-    uint64_t r0 = (uint64_t)9007190664804446U - a0;
-    uint64_t r1 = (uint64_t)9007199254740990U - a1;
-    uint64_t r2 = (uint64_t)9007199254740990U - a2;
-    uint64_t r3 = (uint64_t)9007199254740990U - a3;
-    uint64_t r4 = (uint64_t)562949953421310U - a4;
+    uint64_t r0 = 9007190664804446ULL - a0;
+    uint64_t r1 = 9007199254740990ULL - a1;
+    uint64_t r2 = 9007199254740990ULL - a2;
+    uint64_t r3 = 9007199254740990ULL - a3;
+    uint64_t r4 = 562949953421310ULL - a4;
     uint64_t f0 = r0;
     uint64_t f1 = r1;
     uint64_t f2 = r2;
@@ -612,7 +596,7 @@ static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool
 
 static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uint32_t b)
 {
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  for (uint32_t i = 0U; i < b; i++)
   {
     Hacl_K256_Field_fsqr(out, out);
   }
@@ -620,8 +604,8 @@ static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uin
 
 static inline void Hacl_Impl_K256_Finv_fsquare_times(uint64_t *out, uint64_t *a, uint32_t b)
 {
-  memcpy(out, a, (uint32_t)5U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  memcpy(out, a, 5U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < b; i++)
   {
     Hacl_K256_Field_fsqr(out, out);
   }
@@ -633,29 +617,29 @@ static inline void Hacl_Impl_K256_Finv_fexp_223_23(uint64_t *out, uint64_t *x2,
   uint64_t x22[5U] = { 0U };
   uint64_t x44[5U] = { 0U };
   uint64_t x88[5U] = { 0U };
-  Hacl_Impl_K256_Finv_fsquare_times(x2, f, (uint32_t)1U);
+  Hacl_Impl_K256_Finv_fsquare_times(x2, f, 1U);
   Hacl_K256_Field_fmul(x2, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times(x3, x2, (uint32_t)1U);
+  Hacl_Impl_K256_Finv_fsquare_times(x3, x2, 1U);
   Hacl_K256_Field_fmul(x3, x3, f);
-  Hacl_Impl_K256_Finv_fsquare_times(out, x3, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times(out, x3, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times(x22, out, (uint32_t)11U);
+  Hacl_Impl_K256_Finv_fsquare_times(x22, out, 11U);
   Hacl_K256_Field_fmul(x22, x22, out);
-  Hacl_Impl_K256_Finv_fsquare_times(x44, x22, (uint32_t)22U);
+  Hacl_Impl_K256_Finv_fsquare_times(x44, x22, 22U);
   Hacl_K256_Field_fmul(x44, x44, x22);
-  Hacl_Impl_K256_Finv_fsquare_times(x88, x44, (uint32_t)44U);
+  Hacl_Impl_K256_Finv_fsquare_times(x88, x44, 44U);
   Hacl_K256_Field_fmul(x88, x88, x44);
-  Hacl_Impl_K256_Finv_fsquare_times(out, x88, (uint32_t)88U);
+  Hacl_Impl_K256_Finv_fsquare_times(out, x88, 88U);
   Hacl_K256_Field_fmul(out, out, x88);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)44U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 44U);
   Hacl_K256_Field_fmul(out, out, x44);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)23U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 23U);
   Hacl_K256_Field_fmul(out, out, x22);
 }
 
@@ -663,11 +647,11 @@ static inline void Hacl_Impl_K256_Finv_finv(uint64_t *out, uint64_t *f)
 {
   uint64_t x2[5U] = { 0U };
   Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)5U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 5U);
   Hacl_K256_Field_fmul(out, out, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
   Hacl_K256_Field_fmul(out, out, f);
 }
 
@@ -675,9 +659,9 @@ static inline void Hacl_Impl_K256_Finv_fsqrt(uint64_t *out, uint64_t *f)
 {
   uint64_t x2[5U] = { 0U };
   Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)6U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 6U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
 }
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Ed25519_PrecompTable.h b/include/internal/Hacl_Ed25519_PrecompTable.h
index 77d2244c..a20cd912 100644
--- a/include/internal/Hacl_Ed25519_PrecompTable.h
+++ b/include/internal/Hacl_Ed25519_PrecompTable.h
@@ -39,655 +39,491 @@ static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1738742601995546U, (uint64_t)1146398526822698U,
-    (uint64_t)2070867633025821U, (uint64_t)562264141797630U, (uint64_t)587772402128613U,
-    (uint64_t)1801439850948184U, (uint64_t)1351079888211148U, (uint64_t)450359962737049U,
-    (uint64_t)900719925474099U, (uint64_t)1801439850948198U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1841354044333475U,
-    (uint64_t)16398895984059U, (uint64_t)755974180946558U, (uint64_t)900171276175154U,
-    (uint64_t)1821297809914039U, (uint64_t)1661154287933054U, (uint64_t)284530020860578U,
-    (uint64_t)1390261174866914U, (uint64_t)1524110943907984U, (uint64_t)1045603498418422U,
-    (uint64_t)928651508580478U, (uint64_t)1383326941296346U, (uint64_t)961937908925785U,
-    (uint64_t)80455759693706U, (uint64_t)904734540352947U, (uint64_t)1507481815385608U,
-    (uint64_t)2223447444246085U, (uint64_t)1083941587175919U, (uint64_t)2059929906842505U,
-    (uint64_t)1581435440146976U, (uint64_t)782730187692425U, (uint64_t)9928394897574U,
-    (uint64_t)1539449519985236U, (uint64_t)1923587931078510U, (uint64_t)552919286076056U,
-    (uint64_t)376925408065760U, (uint64_t)447320488831784U, (uint64_t)1362918338468019U,
-    (uint64_t)1470031896696846U, (uint64_t)2189796996539902U, (uint64_t)1337552949959847U,
-    (uint64_t)1762287177775726U, (uint64_t)237994495816815U, (uint64_t)1277840395970544U,
-    (uint64_t)543972849007241U, (uint64_t)1224692671618814U, (uint64_t)162359533289271U,
-    (uint64_t)282240927125249U, (uint64_t)586909166382289U, (uint64_t)17726488197838U,
-    (uint64_t)377014554985659U, (uint64_t)1433835303052512U, (uint64_t)702061469493692U,
-    (uint64_t)1142253108318154U, (uint64_t)318297794307551U, (uint64_t)954362646308543U,
-    (uint64_t)517363881452320U, (uint64_t)1868013482130416U, (uint64_t)262562472373260U,
-    (uint64_t)902232853249919U, (uint64_t)2107343057055746U, (uint64_t)462368348619024U,
-    (uint64_t)1893758677092974U, (uint64_t)2177729767846389U, (uint64_t)2168532543559143U,
-    (uint64_t)443867094639821U, (uint64_t)730169342581022U, (uint64_t)1564589016879755U,
-    (uint64_t)51218195700649U, (uint64_t)76684578423745U, (uint64_t)560266272480743U,
-    (uint64_t)922517457707697U, (uint64_t)2066645939860874U, (uint64_t)1318277348414638U,
-    (uint64_t)1576726809084003U, (uint64_t)1817337608563665U, (uint64_t)1874240939237666U,
-    (uint64_t)754733726333910U, (uint64_t)97085310406474U, (uint64_t)751148364309235U,
-    (uint64_t)1622159695715187U, (uint64_t)1444098819684916U, (uint64_t)130920805558089U,
-    (uint64_t)1260449179085308U, (uint64_t)1860021740768461U, (uint64_t)110052860348509U,
-    (uint64_t)193830891643810U, (uint64_t)164148413933881U, (uint64_t)180017794795332U,
-    (uint64_t)1523506525254651U, (uint64_t)465981629225956U, (uint64_t)559733514964572U,
-    (uint64_t)1279624874416974U, (uint64_t)2026642326892306U, (uint64_t)1425156829982409U,
-    (uint64_t)2160936383793147U, (uint64_t)1061870624975247U, (uint64_t)2023497043036941U,
-    (uint64_t)117942212883190U, (uint64_t)490339622800774U, (uint64_t)1729931303146295U,
-    (uint64_t)422305932971074U, (uint64_t)529103152793096U, (uint64_t)1211973233775992U,
-    (uint64_t)721364955929681U, (uint64_t)1497674430438813U, (uint64_t)342545521275073U,
-    (uint64_t)2102107575279372U, (uint64_t)2108462244669966U, (uint64_t)1382582406064082U,
-    (uint64_t)2206396818383323U, (uint64_t)2109093268641147U, (uint64_t)10809845110983U,
-    (uint64_t)1605176920880099U, (uint64_t)744640650753946U, (uint64_t)1712758897518129U,
-    (uint64_t)373410811281809U, (uint64_t)648838265800209U, (uint64_t)813058095530999U,
-    (uint64_t)513987632620169U, (uint64_t)465516160703329U, (uint64_t)2136322186126330U,
-    (uint64_t)1979645899422932U, (uint64_t)1197131006470786U, (uint64_t)1467836664863979U,
-    (uint64_t)1340751381374628U, (uint64_t)1810066212667962U, (uint64_t)1009933588225499U,
-    (uint64_t)1106129188080873U, (uint64_t)1388980405213901U, (uint64_t)533719246598044U,
-    (uint64_t)1169435803073277U, (uint64_t)198920999285821U, (uint64_t)487492330629854U,
-    (uint64_t)1807093008537778U, (uint64_t)1540899012923865U, (uint64_t)2075080271659867U,
-    (uint64_t)1527990806921523U, (uint64_t)1323728742908002U, (uint64_t)1568595959608205U,
-    (uint64_t)1388032187497212U, (uint64_t)2026968840050568U, (uint64_t)1396591153295755U,
-    (uint64_t)820416950170901U, (uint64_t)520060313205582U, (uint64_t)2016404325094901U,
-    (uint64_t)1584709677868520U, (uint64_t)272161374469956U, (uint64_t)1567188603996816U,
-    (uint64_t)1986160530078221U, (uint64_t)553930264324589U, (uint64_t)1058426729027503U,
-    (uint64_t)8762762886675U, (uint64_t)2216098143382988U, (uint64_t)1835145266889223U,
-    (uint64_t)1712936431558441U, (uint64_t)1017009937844974U, (uint64_t)585361667812740U,
-    (uint64_t)2114711541628181U, (uint64_t)2238729632971439U, (uint64_t)121257546253072U,
-    (uint64_t)847154149018345U, (uint64_t)211972965476684U, (uint64_t)287499084460129U,
-    (uint64_t)2098247259180197U, (uint64_t)839070411583329U, (uint64_t)339551619574372U,
-    (uint64_t)1432951287640743U, (uint64_t)526481249498942U, (uint64_t)931991661905195U,
-    (uint64_t)1884279965674487U, (uint64_t)200486405604411U, (uint64_t)364173020594788U,
-    (uint64_t)518034455936955U, (uint64_t)1085564703965501U, (uint64_t)16030410467927U,
-    (uint64_t)604865933167613U, (uint64_t)1695298441093964U, (uint64_t)498856548116159U,
-    (uint64_t)2193030062787034U, (uint64_t)1706339802964179U, (uint64_t)1721199073493888U,
-    (uint64_t)820740951039755U, (uint64_t)1216053436896834U, (uint64_t)23954895815139U,
-    (uint64_t)1662515208920491U, (uint64_t)1705443427511899U, (uint64_t)1957928899570365U,
-    (uint64_t)1189636258255725U, (uint64_t)1795695471103809U, (uint64_t)1691191297654118U,
-    (uint64_t)282402585374360U, (uint64_t)460405330264832U, (uint64_t)63765529445733U,
-    (uint64_t)469763447404473U, (uint64_t)733607089694996U, (uint64_t)685410420186959U,
-    (uint64_t)1096682630419738U, (uint64_t)1162548510542362U, (uint64_t)1020949526456676U,
-    (uint64_t)1211660396870573U, (uint64_t)613126398222696U, (uint64_t)1117829165843251U,
-    (uint64_t)742432540886650U, (uint64_t)1483755088010658U, (uint64_t)942392007134474U,
-    (uint64_t)1447834130944107U, (uint64_t)489368274863410U, (uint64_t)23192985544898U,
-    (uint64_t)648442406146160U, (uint64_t)785438843373876U, (uint64_t)249464684645238U,
-    (uint64_t)170494608205618U, (uint64_t)335112827260550U, (uint64_t)1462050123162735U,
-    (uint64_t)1084803668439016U, (uint64_t)853459233600325U, (uint64_t)215777728187495U,
-    (uint64_t)1965759433526974U, (uint64_t)1349482894446537U, (uint64_t)694163317612871U,
-    (uint64_t)860536766165036U, (uint64_t)1178788094084321U, (uint64_t)1652739626626996U,
-    (uint64_t)2115723946388185U, (uint64_t)1577204379094664U, (uint64_t)1083882859023240U,
-    (uint64_t)1768759143381635U, (uint64_t)1737180992507258U, (uint64_t)246054513922239U,
-    (uint64_t)577253134087234U, (uint64_t)356340280578042U, (uint64_t)1638917769925142U,
-    (uint64_t)223550348130103U, (uint64_t)470592666638765U, (uint64_t)22663573966996U,
-    (uint64_t)596552461152400U, (uint64_t)364143537069499U, (uint64_t)3942119457699U,
-    (uint64_t)107951982889287U, (uint64_t)1843471406713209U, (uint64_t)1625773041610986U,
-    (uint64_t)1466141092501702U, (uint64_t)1043024095021271U, (uint64_t)310429964047508U,
-    (uint64_t)98559121500372U, (uint64_t)152746933782868U, (uint64_t)259407205078261U,
-    (uint64_t)828123093322585U, (uint64_t)1576847274280091U, (uint64_t)1170871375757302U,
-    (uint64_t)1588856194642775U, (uint64_t)984767822341977U, (uint64_t)1141497997993760U,
-    (uint64_t)809325345150796U, (uint64_t)1879837728202511U, (uint64_t)201340910657893U,
-    (uint64_t)1079157558888483U, (uint64_t)1052373448588065U, (uint64_t)1732036202501778U,
-    (uint64_t)2105292670328445U, (uint64_t)679751387312402U, (uint64_t)1679682144926229U,
-    (uint64_t)1695823455818780U, (uint64_t)498852317075849U, (uint64_t)1786555067788433U,
-    (uint64_t)1670727545779425U, (uint64_t)117945875433544U, (uint64_t)407939139781844U,
-    (uint64_t)854632120023778U, (uint64_t)1413383148360437U, (uint64_t)286030901733673U,
-    (uint64_t)1207361858071196U, (uint64_t)461340408181417U, (uint64_t)1096919590360164U,
-    (uint64_t)1837594897475685U, (uint64_t)533755561544165U, (uint64_t)1638688042247712U,
-    (uint64_t)1431653684793005U, (uint64_t)1036458538873559U, (uint64_t)390822120341779U,
-    (uint64_t)1920929837111618U, (uint64_t)543426740024168U, (uint64_t)645751357799929U,
-    (uint64_t)2245025632994463U, (uint64_t)1550778638076452U, (uint64_t)223738153459949U,
-    (uint64_t)1337209385492033U, (uint64_t)1276967236456531U, (uint64_t)1463815821063071U,
-    (uint64_t)2070620870191473U, (uint64_t)1199170709413753U, (uint64_t)273230877394166U,
-    (uint64_t)1873264887608046U, (uint64_t)890877152910775U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1738742601995546ULL, 1146398526822698ULL, 2070867633025821ULL,
+    562264141797630ULL, 587772402128613ULL, 1801439850948184ULL, 1351079888211148ULL,
+    450359962737049ULL, 900719925474099ULL, 1801439850948198ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1841354044333475ULL, 16398895984059ULL, 755974180946558ULL, 900171276175154ULL,
+    1821297809914039ULL, 1661154287933054ULL, 284530020860578ULL, 1390261174866914ULL,
+    1524110943907984ULL, 1045603498418422ULL, 928651508580478ULL, 1383326941296346ULL,
+    961937908925785ULL, 80455759693706ULL, 904734540352947ULL, 1507481815385608ULL,
+    2223447444246085ULL, 1083941587175919ULL, 2059929906842505ULL, 1581435440146976ULL,
+    782730187692425ULL, 9928394897574ULL, 1539449519985236ULL, 1923587931078510ULL,
+    552919286076056ULL, 376925408065760ULL, 447320488831784ULL, 1362918338468019ULL,
+    1470031896696846ULL, 2189796996539902ULL, 1337552949959847ULL, 1762287177775726ULL,
+    237994495816815ULL, 1277840395970544ULL, 543972849007241ULL, 1224692671618814ULL,
+    162359533289271ULL, 282240927125249ULL, 586909166382289ULL, 17726488197838ULL,
+    377014554985659ULL, 1433835303052512ULL, 702061469493692ULL, 1142253108318154ULL,
+    318297794307551ULL, 954362646308543ULL, 517363881452320ULL, 1868013482130416ULL,
+    262562472373260ULL, 902232853249919ULL, 2107343057055746ULL, 462368348619024ULL,
+    1893758677092974ULL, 2177729767846389ULL, 2168532543559143ULL, 443867094639821ULL,
+    730169342581022ULL, 1564589016879755ULL, 51218195700649ULL, 76684578423745ULL,
+    560266272480743ULL, 922517457707697ULL, 2066645939860874ULL, 1318277348414638ULL,
+    1576726809084003ULL, 1817337608563665ULL, 1874240939237666ULL, 754733726333910ULL,
+    97085310406474ULL, 751148364309235ULL, 1622159695715187ULL, 1444098819684916ULL,
+    130920805558089ULL, 1260449179085308ULL, 1860021740768461ULL, 110052860348509ULL,
+    193830891643810ULL, 164148413933881ULL, 180017794795332ULL, 1523506525254651ULL,
+    465981629225956ULL, 559733514964572ULL, 1279624874416974ULL, 2026642326892306ULL,
+    1425156829982409ULL, 2160936383793147ULL, 1061870624975247ULL, 2023497043036941ULL,
+    117942212883190ULL, 490339622800774ULL, 1729931303146295ULL, 422305932971074ULL,
+    529103152793096ULL, 1211973233775992ULL, 721364955929681ULL, 1497674430438813ULL,
+    342545521275073ULL, 2102107575279372ULL, 2108462244669966ULL, 1382582406064082ULL,
+    2206396818383323ULL, 2109093268641147ULL, 10809845110983ULL, 1605176920880099ULL,
+    744640650753946ULL, 1712758897518129ULL, 373410811281809ULL, 648838265800209ULL,
+    813058095530999ULL, 513987632620169ULL, 465516160703329ULL, 2136322186126330ULL,
+    1979645899422932ULL, 1197131006470786ULL, 1467836664863979ULL, 1340751381374628ULL,
+    1810066212667962ULL, 1009933588225499ULL, 1106129188080873ULL, 1388980405213901ULL,
+    533719246598044ULL, 1169435803073277ULL, 198920999285821ULL, 487492330629854ULL,
+    1807093008537778ULL, 1540899012923865ULL, 2075080271659867ULL, 1527990806921523ULL,
+    1323728742908002ULL, 1568595959608205ULL, 1388032187497212ULL, 2026968840050568ULL,
+    1396591153295755ULL, 820416950170901ULL, 520060313205582ULL, 2016404325094901ULL,
+    1584709677868520ULL, 272161374469956ULL, 1567188603996816ULL, 1986160530078221ULL,
+    553930264324589ULL, 1058426729027503ULL, 8762762886675ULL, 2216098143382988ULL,
+    1835145266889223ULL, 1712936431558441ULL, 1017009937844974ULL, 585361667812740ULL,
+    2114711541628181ULL, 2238729632971439ULL, 121257546253072ULL, 847154149018345ULL,
+    211972965476684ULL, 287499084460129ULL, 2098247259180197ULL, 839070411583329ULL,
+    339551619574372ULL, 1432951287640743ULL, 526481249498942ULL, 931991661905195ULL,
+    1884279965674487ULL, 200486405604411ULL, 364173020594788ULL, 518034455936955ULL,
+    1085564703965501ULL, 16030410467927ULL, 604865933167613ULL, 1695298441093964ULL,
+    498856548116159ULL, 2193030062787034ULL, 1706339802964179ULL, 1721199073493888ULL,
+    820740951039755ULL, 1216053436896834ULL, 23954895815139ULL, 1662515208920491ULL,
+    1705443427511899ULL, 1957928899570365ULL, 1189636258255725ULL, 1795695471103809ULL,
+    1691191297654118ULL, 282402585374360ULL, 460405330264832ULL, 63765529445733ULL,
+    469763447404473ULL, 733607089694996ULL, 685410420186959ULL, 1096682630419738ULL,
+    1162548510542362ULL, 1020949526456676ULL, 1211660396870573ULL, 613126398222696ULL,
+    1117829165843251ULL, 742432540886650ULL, 1483755088010658ULL, 942392007134474ULL,
+    1447834130944107ULL, 489368274863410ULL, 23192985544898ULL, 648442406146160ULL,
+    785438843373876ULL, 249464684645238ULL, 170494608205618ULL, 335112827260550ULL,
+    1462050123162735ULL, 1084803668439016ULL, 853459233600325ULL, 215777728187495ULL,
+    1965759433526974ULL, 1349482894446537ULL, 694163317612871ULL, 860536766165036ULL,
+    1178788094084321ULL, 1652739626626996ULL, 2115723946388185ULL, 1577204379094664ULL,
+    1083882859023240ULL, 1768759143381635ULL, 1737180992507258ULL, 246054513922239ULL,
+    577253134087234ULL, 356340280578042ULL, 1638917769925142ULL, 223550348130103ULL,
+    470592666638765ULL, 22663573966996ULL, 596552461152400ULL, 364143537069499ULL, 3942119457699ULL,
+    107951982889287ULL, 1843471406713209ULL, 1625773041610986ULL, 1466141092501702ULL,
+    1043024095021271ULL, 310429964047508ULL, 98559121500372ULL, 152746933782868ULL,
+    259407205078261ULL, 828123093322585ULL, 1576847274280091ULL, 1170871375757302ULL,
+    1588856194642775ULL, 984767822341977ULL, 1141497997993760ULL, 809325345150796ULL,
+    1879837728202511ULL, 201340910657893ULL, 1079157558888483ULL, 1052373448588065ULL,
+    1732036202501778ULL, 2105292670328445ULL, 679751387312402ULL, 1679682144926229ULL,
+    1695823455818780ULL, 498852317075849ULL, 1786555067788433ULL, 1670727545779425ULL,
+    117945875433544ULL, 407939139781844ULL, 854632120023778ULL, 1413383148360437ULL,
+    286030901733673ULL, 1207361858071196ULL, 461340408181417ULL, 1096919590360164ULL,
+    1837594897475685ULL, 533755561544165ULL, 1638688042247712ULL, 1431653684793005ULL,
+    1036458538873559ULL, 390822120341779ULL, 1920929837111618ULL, 543426740024168ULL,
+    645751357799929ULL, 2245025632994463ULL, 1550778638076452ULL, 223738153459949ULL,
+    1337209385492033ULL, 1276967236456531ULL, 1463815821063071ULL, 2070620870191473ULL,
+    1199170709413753ULL, 273230877394166ULL, 1873264887608046ULL, 890877152910775ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)13559344787725U, (uint64_t)2051621493703448U,
-    (uint64_t)1947659315640708U, (uint64_t)626856790370168U, (uint64_t)1592804284034836U,
-    (uint64_t)1781728767459187U, (uint64_t)278818420518009U, (uint64_t)2038030359908351U,
-    (uint64_t)910625973862690U, (uint64_t)471887343142239U, (uint64_t)1298543306606048U,
-    (uint64_t)794147365642417U, (uint64_t)129968992326749U, (uint64_t)523140861678572U,
-    (uint64_t)1166419653909231U, (uint64_t)2009637196928390U, (uint64_t)1288020222395193U,
-    (uint64_t)1007046974985829U, (uint64_t)208981102651386U, (uint64_t)2074009315253380U,
-    (uint64_t)1564056062071967U, (uint64_t)276822668750618U, (uint64_t)206621292512572U,
-    (uint64_t)470304361809269U, (uint64_t)895215438398493U, (uint64_t)1527859053868686U,
-    (uint64_t)1624967223409369U, (uint64_t)811821865979736U, (uint64_t)350450534838340U,
-    (uint64_t)219143807921807U, (uint64_t)507994540371254U, (uint64_t)986513794574720U,
-    (uint64_t)1142661369967121U, (uint64_t)621278293399257U, (uint64_t)556189161519781U,
-    (uint64_t)351964007865066U, (uint64_t)2011573453777822U, (uint64_t)1367125527151537U,
-    (uint64_t)1691316722438196U, (uint64_t)731328817345164U, (uint64_t)1284781192709232U,
-    (uint64_t)478439299539269U, (uint64_t)204842178076429U, (uint64_t)2085125369913651U,
-    (uint64_t)1980773492792985U, (uint64_t)1480264409524940U, (uint64_t)688389585376233U,
-    (uint64_t)612962643526972U, (uint64_t)165595382536676U, (uint64_t)1850300069212263U,
-    (uint64_t)1176357203491551U, (uint64_t)1880164984292321U, (uint64_t)10786153104736U,
-    (uint64_t)1242293560510203U, (uint64_t)1358399951884084U, (uint64_t)1901358796610357U,
-    (uint64_t)1385092558795806U, (uint64_t)1734893785311348U, (uint64_t)2046201851951191U,
-    (uint64_t)1233811309557352U, (uint64_t)1531160168656129U, (uint64_t)1543287181303358U,
-    (uint64_t)516121446374119U, (uint64_t)723422668089935U, (uint64_t)1228176774959679U,
-    (uint64_t)1598014722726267U, (uint64_t)1630810326658412U, (uint64_t)1343833067463760U,
-    (uint64_t)1024397964362099U, (uint64_t)1157142161346781U, (uint64_t)56422174971792U,
-    (uint64_t)544901687297092U, (uint64_t)1291559028869009U, (uint64_t)1336918672345120U,
-    (uint64_t)1390874603281353U, (uint64_t)1127199512010904U, (uint64_t)992644979940964U,
-    (uint64_t)1035213479783573U, (uint64_t)36043651196100U, (uint64_t)1220961519321221U,
-    (uint64_t)1348190007756977U, (uint64_t)579420200329088U, (uint64_t)1703819961008985U,
-    (uint64_t)1993919213460047U, (uint64_t)2225080008232251U, (uint64_t)392785893702372U,
-    (uint64_t)464312521482632U, (uint64_t)1224525362116057U, (uint64_t)810394248933036U,
-    (uint64_t)932513521649107U, (uint64_t)592314953488703U, (uint64_t)586334603791548U,
-    (uint64_t)1310888126096549U, (uint64_t)650842674074281U, (uint64_t)1596447001791059U,
-    (uint64_t)2086767406328284U, (uint64_t)1866377645879940U, (uint64_t)1721604362642743U,
-    (uint64_t)738502322566890U, (uint64_t)1851901097729689U, (uint64_t)1158347571686914U,
-    (uint64_t)2023626733470827U, (uint64_t)329625404653699U, (uint64_t)563555875598551U,
-    (uint64_t)516554588079177U, (uint64_t)1134688306104598U, (uint64_t)186301198420809U,
-    (uint64_t)1339952213563300U, (uint64_t)643605614625891U, (uint64_t)1947505332718043U,
-    (uint64_t)1722071694852824U, (uint64_t)601679570440694U, (uint64_t)1821275721236351U,
-    (uint64_t)1808307842870389U, (uint64_t)1654165204015635U, (uint64_t)1457334100715245U,
-    (uint64_t)217784948678349U, (uint64_t)1820622417674817U, (uint64_t)1946121178444661U,
-    (uint64_t)597980757799332U, (uint64_t)1745271227710764U, (uint64_t)2010952890941980U,
-    (uint64_t)339811849696648U, (uint64_t)1066120666993872U, (uint64_t)261276166508990U,
-    (uint64_t)323098645774553U, (uint64_t)207454744271283U, (uint64_t)941448672977675U,
-    (uint64_t)71890920544375U, (uint64_t)840849789313357U, (uint64_t)1223996070717926U,
-    (uint64_t)196832550853408U, (uint64_t)115986818309231U, (uint64_t)1586171527267675U,
-    (uint64_t)1666169080973450U, (uint64_t)1456454731176365U, (uint64_t)44467854369003U,
-    (uint64_t)2149656190691480U, (uint64_t)283446383597589U, (uint64_t)2040542647729974U,
-    (uint64_t)305705593840224U, (uint64_t)475315822269791U, (uint64_t)648133452550632U,
-    (uint64_t)169218658835720U, (uint64_t)24960052338251U, (uint64_t)938907951346766U,
-    (uint64_t)425970950490510U, (uint64_t)1037622011013183U, (uint64_t)1026882082708180U,
-    (uint64_t)1635699409504916U, (uint64_t)1644776942870488U, (uint64_t)2151820331175914U,
-    (uint64_t)824120674069819U, (uint64_t)835744976610113U, (uint64_t)1991271032313190U,
-    (uint64_t)96507354724855U, (uint64_t)400645405133260U, (uint64_t)343728076650825U,
-    (uint64_t)1151585441385566U, (uint64_t)1403339955333520U, (uint64_t)230186314139774U,
-    (uint64_t)1736248861506714U, (uint64_t)1010804378904572U, (uint64_t)1394932289845636U,
-    (uint64_t)1901351256960852U, (uint64_t)2187471430089807U, (uint64_t)1003853262342670U,
-    (uint64_t)1327743396767461U, (uint64_t)1465160415991740U, (uint64_t)366625359144534U,
-    (uint64_t)1534791405247604U, (uint64_t)1790905930250187U, (uint64_t)1255484115292738U,
-    (uint64_t)2223291365520443U, (uint64_t)210967717407408U, (uint64_t)26722916813442U,
-    (uint64_t)1919574361907910U, (uint64_t)468825088280256U, (uint64_t)2230011775946070U,
-    (uint64_t)1628365642214479U, (uint64_t)568871869234932U, (uint64_t)1066987968780488U,
-    (uint64_t)1692242903745558U, (uint64_t)1678903997328589U, (uint64_t)214262165888021U,
-    (uint64_t)1929686748607204U, (uint64_t)1790138967989670U, (uint64_t)1790261616022076U,
-    (uint64_t)1559824537553112U, (uint64_t)1230364591311358U, (uint64_t)147531939886346U,
-    (uint64_t)1528207085815487U, (uint64_t)477957922927292U, (uint64_t)285670243881618U,
-    (uint64_t)264430080123332U, (uint64_t)1163108160028611U, (uint64_t)373201522147371U,
-    (uint64_t)34903775270979U, (uint64_t)1750870048600662U, (uint64_t)1319328308741084U,
-    (uint64_t)1547548634278984U, (uint64_t)1691259592202927U, (uint64_t)2247758037259814U,
-    (uint64_t)329611399953677U, (uint64_t)1385555496268877U, (uint64_t)2242438354031066U,
-    (uint64_t)1329523854843632U, (uint64_t)399895373846055U, (uint64_t)678005703193452U,
-    (uint64_t)1496357700997771U, (uint64_t)71909969781942U, (uint64_t)1515391418612349U,
-    (uint64_t)470110837888178U, (uint64_t)1981307309417466U, (uint64_t)1259888737412276U,
-    (uint64_t)669991710228712U, (uint64_t)1048546834514303U, (uint64_t)1678323291295512U,
-    (uint64_t)2172033978088071U, (uint64_t)1529278455500556U, (uint64_t)901984601941894U,
-    (uint64_t)780867622403807U, (uint64_t)550105677282793U, (uint64_t)975860231176136U,
-    (uint64_t)525188281689178U, (uint64_t)49966114807992U, (uint64_t)1776449263836645U,
-    (uint64_t)267851776380338U, (uint64_t)2225969494054620U, (uint64_t)2016794225789822U,
-    (uint64_t)1186108678266608U, (uint64_t)1023083271408882U, (uint64_t)1119289418565906U,
-    (uint64_t)1248185897348801U, (uint64_t)1846081539082697U, (uint64_t)23756429626075U,
-    (uint64_t)1441999021105403U, (uint64_t)724497586552825U, (uint64_t)1287761623605379U,
-    (uint64_t)685303359654224U, (uint64_t)2217156930690570U, (uint64_t)163769288918347U,
-    (uint64_t)1098423278284094U, (uint64_t)1391470723006008U, (uint64_t)570700152353516U,
-    (uint64_t)744804507262556U, (uint64_t)2200464788609495U, (uint64_t)624141899161992U,
-    (uint64_t)2249570166275684U, (uint64_t)378706441983561U, (uint64_t)122486379999375U,
-    (uint64_t)430741162798924U, (uint64_t)113847463452574U, (uint64_t)266250457840685U,
-    (uint64_t)2120743625072743U, (uint64_t)222186221043927U, (uint64_t)1964290018305582U,
-    (uint64_t)1435278008132477U, (uint64_t)1670867456663734U, (uint64_t)2009989552599079U,
-    (uint64_t)1348024113448744U, (uint64_t)1158423886300455U, (uint64_t)1356467152691569U,
-    (uint64_t)306943042363674U, (uint64_t)926879628664255U, (uint64_t)1349295689598324U,
-    (uint64_t)725558330071205U, (uint64_t)536569987519948U, (uint64_t)116436990335366U,
-    (uint64_t)1551888573800376U, (uint64_t)2044698345945451U, (uint64_t)104279940291311U,
-    (uint64_t)251526570943220U, (uint64_t)754735828122925U, (uint64_t)33448073576361U,
-    (uint64_t)994605876754543U, (uint64_t)546007584022006U, (uint64_t)2217332798409487U,
-    (uint64_t)706477052561591U, (uint64_t)131174619428653U, (uint64_t)2148698284087243U,
-    (uint64_t)239290486205186U, (uint64_t)2161325796952184U, (uint64_t)1713452845607994U,
-    (uint64_t)1297861562938913U, (uint64_t)1779539876828514U, (uint64_t)1926559018603871U,
-    (uint64_t)296485747893968U, (uint64_t)1859208206640686U, (uint64_t)538513979002718U,
-    (uint64_t)103998826506137U, (uint64_t)2025375396538469U, (uint64_t)1370680785701206U,
-    (uint64_t)1698557311253840U, (uint64_t)1411096399076595U, (uint64_t)2132580530813677U,
-    (uint64_t)2071564345845035U, (uint64_t)498581428556735U, (uint64_t)1136010486691371U,
-    (uint64_t)1927619356993146U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 13559344787725ULL, 2051621493703448ULL, 1947659315640708ULL,
+    626856790370168ULL, 1592804284034836ULL, 1781728767459187ULL, 278818420518009ULL,
+    2038030359908351ULL, 910625973862690ULL, 471887343142239ULL, 1298543306606048ULL,
+    794147365642417ULL, 129968992326749ULL, 523140861678572ULL, 1166419653909231ULL,
+    2009637196928390ULL, 1288020222395193ULL, 1007046974985829ULL, 208981102651386ULL,
+    2074009315253380ULL, 1564056062071967ULL, 276822668750618ULL, 206621292512572ULL,
+    470304361809269ULL, 895215438398493ULL, 1527859053868686ULL, 1624967223409369ULL,
+    811821865979736ULL, 350450534838340ULL, 219143807921807ULL, 507994540371254ULL,
+    986513794574720ULL, 1142661369967121ULL, 621278293399257ULL, 556189161519781ULL,
+    351964007865066ULL, 2011573453777822ULL, 1367125527151537ULL, 1691316722438196ULL,
+    731328817345164ULL, 1284781192709232ULL, 478439299539269ULL, 204842178076429ULL,
+    2085125369913651ULL, 1980773492792985ULL, 1480264409524940ULL, 688389585376233ULL,
+    612962643526972ULL, 165595382536676ULL, 1850300069212263ULL, 1176357203491551ULL,
+    1880164984292321ULL, 10786153104736ULL, 1242293560510203ULL, 1358399951884084ULL,
+    1901358796610357ULL, 1385092558795806ULL, 1734893785311348ULL, 2046201851951191ULL,
+    1233811309557352ULL, 1531160168656129ULL, 1543287181303358ULL, 516121446374119ULL,
+    723422668089935ULL, 1228176774959679ULL, 1598014722726267ULL, 1630810326658412ULL,
+    1343833067463760ULL, 1024397964362099ULL, 1157142161346781ULL, 56422174971792ULL,
+    544901687297092ULL, 1291559028869009ULL, 1336918672345120ULL, 1390874603281353ULL,
+    1127199512010904ULL, 992644979940964ULL, 1035213479783573ULL, 36043651196100ULL,
+    1220961519321221ULL, 1348190007756977ULL, 579420200329088ULL, 1703819961008985ULL,
+    1993919213460047ULL, 2225080008232251ULL, 392785893702372ULL, 464312521482632ULL,
+    1224525362116057ULL, 810394248933036ULL, 932513521649107ULL, 592314953488703ULL,
+    586334603791548ULL, 1310888126096549ULL, 650842674074281ULL, 1596447001791059ULL,
+    2086767406328284ULL, 1866377645879940ULL, 1721604362642743ULL, 738502322566890ULL,
+    1851901097729689ULL, 1158347571686914ULL, 2023626733470827ULL, 329625404653699ULL,
+    563555875598551ULL, 516554588079177ULL, 1134688306104598ULL, 186301198420809ULL,
+    1339952213563300ULL, 643605614625891ULL, 1947505332718043ULL, 1722071694852824ULL,
+    601679570440694ULL, 1821275721236351ULL, 1808307842870389ULL, 1654165204015635ULL,
+    1457334100715245ULL, 217784948678349ULL, 1820622417674817ULL, 1946121178444661ULL,
+    597980757799332ULL, 1745271227710764ULL, 2010952890941980ULL, 339811849696648ULL,
+    1066120666993872ULL, 261276166508990ULL, 323098645774553ULL, 207454744271283ULL,
+    941448672977675ULL, 71890920544375ULL, 840849789313357ULL, 1223996070717926ULL,
+    196832550853408ULL, 115986818309231ULL, 1586171527267675ULL, 1666169080973450ULL,
+    1456454731176365ULL, 44467854369003ULL, 2149656190691480ULL, 283446383597589ULL,
+    2040542647729974ULL, 305705593840224ULL, 475315822269791ULL, 648133452550632ULL,
+    169218658835720ULL, 24960052338251ULL, 938907951346766ULL, 425970950490510ULL,
+    1037622011013183ULL, 1026882082708180ULL, 1635699409504916ULL, 1644776942870488ULL,
+    2151820331175914ULL, 824120674069819ULL, 835744976610113ULL, 1991271032313190ULL,
+    96507354724855ULL, 400645405133260ULL, 343728076650825ULL, 1151585441385566ULL,
+    1403339955333520ULL, 230186314139774ULL, 1736248861506714ULL, 1010804378904572ULL,
+    1394932289845636ULL, 1901351256960852ULL, 2187471430089807ULL, 1003853262342670ULL,
+    1327743396767461ULL, 1465160415991740ULL, 366625359144534ULL, 1534791405247604ULL,
+    1790905930250187ULL, 1255484115292738ULL, 2223291365520443ULL, 210967717407408ULL,
+    26722916813442ULL, 1919574361907910ULL, 468825088280256ULL, 2230011775946070ULL,
+    1628365642214479ULL, 568871869234932ULL, 1066987968780488ULL, 1692242903745558ULL,
+    1678903997328589ULL, 214262165888021ULL, 1929686748607204ULL, 1790138967989670ULL,
+    1790261616022076ULL, 1559824537553112ULL, 1230364591311358ULL, 147531939886346ULL,
+    1528207085815487ULL, 477957922927292ULL, 285670243881618ULL, 264430080123332ULL,
+    1163108160028611ULL, 373201522147371ULL, 34903775270979ULL, 1750870048600662ULL,
+    1319328308741084ULL, 1547548634278984ULL, 1691259592202927ULL, 2247758037259814ULL,
+    329611399953677ULL, 1385555496268877ULL, 2242438354031066ULL, 1329523854843632ULL,
+    399895373846055ULL, 678005703193452ULL, 1496357700997771ULL, 71909969781942ULL,
+    1515391418612349ULL, 470110837888178ULL, 1981307309417466ULL, 1259888737412276ULL,
+    669991710228712ULL, 1048546834514303ULL, 1678323291295512ULL, 2172033978088071ULL,
+    1529278455500556ULL, 901984601941894ULL, 780867622403807ULL, 550105677282793ULL,
+    975860231176136ULL, 525188281689178ULL, 49966114807992ULL, 1776449263836645ULL,
+    267851776380338ULL, 2225969494054620ULL, 2016794225789822ULL, 1186108678266608ULL,
+    1023083271408882ULL, 1119289418565906ULL, 1248185897348801ULL, 1846081539082697ULL,
+    23756429626075ULL, 1441999021105403ULL, 724497586552825ULL, 1287761623605379ULL,
+    685303359654224ULL, 2217156930690570ULL, 163769288918347ULL, 1098423278284094ULL,
+    1391470723006008ULL, 570700152353516ULL, 744804507262556ULL, 2200464788609495ULL,
+    624141899161992ULL, 2249570166275684ULL, 378706441983561ULL, 122486379999375ULL,
+    430741162798924ULL, 113847463452574ULL, 266250457840685ULL, 2120743625072743ULL,
+    222186221043927ULL, 1964290018305582ULL, 1435278008132477ULL, 1670867456663734ULL,
+    2009989552599079ULL, 1348024113448744ULL, 1158423886300455ULL, 1356467152691569ULL,
+    306943042363674ULL, 926879628664255ULL, 1349295689598324ULL, 725558330071205ULL,
+    536569987519948ULL, 116436990335366ULL, 1551888573800376ULL, 2044698345945451ULL,
+    104279940291311ULL, 251526570943220ULL, 754735828122925ULL, 33448073576361ULL,
+    994605876754543ULL, 546007584022006ULL, 2217332798409487ULL, 706477052561591ULL,
+    131174619428653ULL, 2148698284087243ULL, 239290486205186ULL, 2161325796952184ULL,
+    1713452845607994ULL, 1297861562938913ULL, 1779539876828514ULL, 1926559018603871ULL,
+    296485747893968ULL, 1859208206640686ULL, 538513979002718ULL, 103998826506137ULL,
+    2025375396538469ULL, 1370680785701206ULL, 1698557311253840ULL, 1411096399076595ULL,
+    2132580530813677ULL, 2071564345845035ULL, 498581428556735ULL, 1136010486691371ULL,
+    1927619356993146ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)557549315715710U, (uint64_t)196756086293855U,
-    (uint64_t)846062225082495U, (uint64_t)1865068224838092U, (uint64_t)991112090754908U,
-    (uint64_t)522916421512828U, (uint64_t)2098523346722375U, (uint64_t)1135633221747012U,
-    (uint64_t)858420432114866U, (uint64_t)186358544306082U, (uint64_t)1044420411868480U,
-    (uint64_t)2080052304349321U, (uint64_t)557301814716724U, (uint64_t)1305130257814057U,
-    (uint64_t)2126012765451197U, (uint64_t)1441004402875101U, (uint64_t)353948968859203U,
-    (uint64_t)470765987164835U, (uint64_t)1507675957683570U, (uint64_t)1086650358745097U,
-    (uint64_t)1911913434398388U, (uint64_t)66086091117182U, (uint64_t)1137511952425971U,
-    (uint64_t)36958263512141U, (uint64_t)2193310025325256U, (uint64_t)1085191426269045U,
-    (uint64_t)1232148267909446U, (uint64_t)1449894406170117U, (uint64_t)1241416717139557U,
-    (uint64_t)1940876999212868U, (uint64_t)829758415918121U, (uint64_t)309608450373449U,
-    (uint64_t)2228398547683851U, (uint64_t)1580623271960188U, (uint64_t)1675601502456740U,
-    (uint64_t)1360363115493548U, (uint64_t)1098397313096815U, (uint64_t)1809255384359797U,
-    (uint64_t)1458261916834384U, (uint64_t)210682545649705U, (uint64_t)1606836641068115U,
-    (uint64_t)1230478270405318U, (uint64_t)1843192771547802U, (uint64_t)1794596343564051U,
-    (uint64_t)229060710252162U, (uint64_t)2169742775467181U, (uint64_t)701467067318072U,
-    (uint64_t)696018499035555U, (uint64_t)521051885339807U, (uint64_t)158329567901874U,
-    (uint64_t)740426481832143U, (uint64_t)1369811177301441U, (uint64_t)503351589084015U,
-    (uint64_t)1781114827942261U, (uint64_t)1650493549693035U, (uint64_t)2174562418345156U,
-    (uint64_t)456517194809244U, (uint64_t)2052761522121179U, (uint64_t)2233342271123682U,
-    (uint64_t)1445872925177435U, (uint64_t)1131882576902813U, (uint64_t)220765848055241U,
-    (uint64_t)1280259961403769U, (uint64_t)1581497080160712U, (uint64_t)1477441080108824U,
-    (uint64_t)218428165202767U, (uint64_t)1970598141278907U, (uint64_t)643366736173069U,
-    (uint64_t)2167909426804014U, (uint64_t)834993711408259U, (uint64_t)1922437166463212U,
-    (uint64_t)1900036281472252U, (uint64_t)513794844386304U, (uint64_t)1297904164900114U,
-    (uint64_t)1147626295373268U, (uint64_t)1910101606251299U, (uint64_t)182933838633381U,
-    (uint64_t)806229530787362U, (uint64_t)155511666433200U, (uint64_t)290522463375462U,
-    (uint64_t)534373523491751U, (uint64_t)1302938814480515U, (uint64_t)1664979184120445U,
-    (uint64_t)304235649499423U, (uint64_t)339284524318609U, (uint64_t)1881717946973483U,
-    (uint64_t)1670802286833842U, (uint64_t)2223637120675737U, (uint64_t)135818919485814U,
-    (uint64_t)1144856572842792U, (uint64_t)2234981613434386U, (uint64_t)963917024969826U,
-    (uint64_t)402275378284993U, (uint64_t)141532417412170U, (uint64_t)921537468739387U,
-    (uint64_t)963905069722607U, (uint64_t)1405442890733358U, (uint64_t)1567763927164655U,
-    (uint64_t)1664776329195930U, (uint64_t)2095924165508507U, (uint64_t)994243110271379U,
-    (uint64_t)1243925610609353U, (uint64_t)1029845815569727U, (uint64_t)1001968867985629U,
-    (uint64_t)170368934002484U, (uint64_t)1100906131583801U, (uint64_t)1825190326449569U,
-    (uint64_t)1462285121182096U, (uint64_t)1545240767016377U, (uint64_t)797859025652273U,
-    (uint64_t)1062758326657530U, (uint64_t)1125600735118266U, (uint64_t)739325756774527U,
-    (uint64_t)1420144485966996U, (uint64_t)1915492743426702U, (uint64_t)752968196344993U,
-    (uint64_t)882156396938351U, (uint64_t)1909097048763227U, (uint64_t)849058590685611U,
-    (uint64_t)840754951388500U, (uint64_t)1832926948808323U, (uint64_t)2023317100075297U,
-    (uint64_t)322382745442827U, (uint64_t)1569741341737601U, (uint64_t)1678986113194987U,
-    (uint64_t)757598994581938U, (uint64_t)29678659580705U, (uint64_t)1239680935977986U,
-    (uint64_t)1509239427168474U, (uint64_t)1055981929287006U, (uint64_t)1894085471158693U,
-    (uint64_t)916486225488490U, (uint64_t)642168890366120U, (uint64_t)300453362620010U,
-    (uint64_t)1858797242721481U, (uint64_t)2077989823177130U, (uint64_t)510228455273334U,
-    (uint64_t)1473284798689270U, (uint64_t)5173934574301U, (uint64_t)765285232030050U,
-    (uint64_t)1007154707631065U, (uint64_t)1862128712885972U, (uint64_t)168873464821340U,
-    (uint64_t)1967853269759318U, (uint64_t)1489896018263031U, (uint64_t)592451806166369U,
-    (uint64_t)1242298565603883U, (uint64_t)1838918921339058U, (uint64_t)697532763910695U,
-    (uint64_t)294335466239059U, (uint64_t)135687058387449U, (uint64_t)2133734403874176U,
-    (uint64_t)2121911143127699U, (uint64_t)20222476737364U, (uint64_t)1200824626476747U,
-    (uint64_t)1397731736540791U, (uint64_t)702378430231418U, (uint64_t)59059527640068U,
-    (uint64_t)460992547183981U, (uint64_t)1016125857842765U, (uint64_t)1273530839608957U,
-    (uint64_t)96724128829301U, (uint64_t)1313433042425233U, (uint64_t)3543822857227U,
-    (uint64_t)761975685357118U, (uint64_t)110417360745248U, (uint64_t)1079634164577663U,
-    (uint64_t)2044574510020457U, (uint64_t)338709058603120U, (uint64_t)94541336042799U,
-    (uint64_t)127963233585039U, (uint64_t)94427896272258U, (uint64_t)1143501979342182U,
-    (uint64_t)1217958006212230U, (uint64_t)2153887831492134U, (uint64_t)1519219513255575U,
-    (uint64_t)251793195454181U, (uint64_t)392517349345200U, (uint64_t)1507033011868881U,
-    (uint64_t)2208494254670752U, (uint64_t)1364389582694359U, (uint64_t)2214069430728063U,
-    (uint64_t)1272814257105752U, (uint64_t)741450148906352U, (uint64_t)1105776675555685U,
-    (uint64_t)824447222014984U, (uint64_t)528745219306376U, (uint64_t)589427609121575U,
-    (uint64_t)1501786838809155U, (uint64_t)379067373073147U, (uint64_t)184909476589356U,
-    (uint64_t)1346887560616185U, (uint64_t)1932023742314082U, (uint64_t)1633302311869264U,
-    (uint64_t)1685314821133069U, (uint64_t)1836610282047884U, (uint64_t)1595571594397150U,
-    (uint64_t)615441688872198U, (uint64_t)1926435616702564U, (uint64_t)235632180396480U,
-    (uint64_t)1051918343571810U, (uint64_t)2150570051687050U, (uint64_t)879198845408738U,
-    (uint64_t)1443966275205464U, (uint64_t)481362545245088U, (uint64_t)512807443532642U,
-    (uint64_t)641147578283480U, (uint64_t)1594276116945596U, (uint64_t)1844812743300602U,
-    (uint64_t)2044559316019485U, (uint64_t)202620777969020U, (uint64_t)852992984136302U,
-    (uint64_t)1500869642692910U, (uint64_t)1085216217052457U, (uint64_t)1736294372259758U,
-    (uint64_t)2009666354486552U, (uint64_t)1262389020715248U, (uint64_t)1166527705256867U,
-    (uint64_t)1409917450806036U, (uint64_t)1705819160057637U, (uint64_t)1116901782584378U,
-    (uint64_t)1278460472285473U, (uint64_t)257879811360157U, (uint64_t)40314007176886U,
-    (uint64_t)701309846749639U, (uint64_t)1380457676672777U, (uint64_t)631519782380272U,
-    (uint64_t)1196339573466793U, (uint64_t)955537708940017U, (uint64_t)532725633381530U,
-    (uint64_t)641190593731833U, (uint64_t)7214357153807U, (uint64_t)481922072107983U,
-    (uint64_t)1634886189207352U, (uint64_t)1247659758261633U, (uint64_t)1655809614786430U,
-    (uint64_t)43105797900223U, (uint64_t)76205809912607U, (uint64_t)1936575107455823U,
-    (uint64_t)1107927314642236U, (uint64_t)2199986333469333U, (uint64_t)802974829322510U,
-    (uint64_t)718173128143482U, (uint64_t)539385184235615U, (uint64_t)2075693785611221U,
-    (uint64_t)953281147333690U, (uint64_t)1623571637172587U, (uint64_t)655274535022250U,
-    (uint64_t)1568078078819021U, (uint64_t)101142125049712U, (uint64_t)1488441673350881U,
-    (uint64_t)1457969561944515U, (uint64_t)1492622544287712U, (uint64_t)2041460689280803U,
-    (uint64_t)1961848091392887U, (uint64_t)461003520846938U, (uint64_t)934728060399807U,
-    (uint64_t)117723291519705U, (uint64_t)1027773762863526U, (uint64_t)56765304991567U,
-    (uint64_t)2184028379550479U, (uint64_t)1768767711894030U, (uint64_t)1304432068983172U,
-    (uint64_t)498080974452325U, (uint64_t)2134905654858163U, (uint64_t)1446137427202647U,
-    (uint64_t)551613831549590U, (uint64_t)680288767054205U, (uint64_t)1278113339140386U,
-    (uint64_t)378149431842614U, (uint64_t)80520494426960U, (uint64_t)2080985256348782U,
-    (uint64_t)673432591799820U, (uint64_t)739189463724560U, (uint64_t)1847191452197509U,
-    (uint64_t)527737312871602U, (uint64_t)477609358840073U, (uint64_t)1891633072677946U,
-    (uint64_t)1841456828278466U, (uint64_t)2242502936489002U, (uint64_t)524791829362709U,
-    (uint64_t)276648168514036U, (uint64_t)991706903257619U, (uint64_t)512580228297906U,
-    (uint64_t)1216855104975946U, (uint64_t)67030930303149U, (uint64_t)769593945208213U,
-    (uint64_t)2048873385103577U, (uint64_t)455635274123107U, (uint64_t)2077404927176696U,
-    (uint64_t)1803539634652306U, (uint64_t)1837579953843417U, (uint64_t)1564240068662828U,
-    (uint64_t)1964310918970435U, (uint64_t)832822906252492U, (uint64_t)1516044634195010U,
-    (uint64_t)770571447506889U, (uint64_t)602215152486818U, (uint64_t)1760828333136947U,
-    (uint64_t)730156776030376U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 557549315715710ULL, 196756086293855ULL, 846062225082495ULL,
+    1865068224838092ULL, 991112090754908ULL, 522916421512828ULL, 2098523346722375ULL,
+    1135633221747012ULL, 858420432114866ULL, 186358544306082ULL, 1044420411868480ULL,
+    2080052304349321ULL, 557301814716724ULL, 1305130257814057ULL, 2126012765451197ULL,
+    1441004402875101ULL, 353948968859203ULL, 470765987164835ULL, 1507675957683570ULL,
+    1086650358745097ULL, 1911913434398388ULL, 66086091117182ULL, 1137511952425971ULL,
+    36958263512141ULL, 2193310025325256ULL, 1085191426269045ULL, 1232148267909446ULL,
+    1449894406170117ULL, 1241416717139557ULL, 1940876999212868ULL, 829758415918121ULL,
+    309608450373449ULL, 2228398547683851ULL, 1580623271960188ULL, 1675601502456740ULL,
+    1360363115493548ULL, 1098397313096815ULL, 1809255384359797ULL, 1458261916834384ULL,
+    210682545649705ULL, 1606836641068115ULL, 1230478270405318ULL, 1843192771547802ULL,
+    1794596343564051ULL, 229060710252162ULL, 2169742775467181ULL, 701467067318072ULL,
+    696018499035555ULL, 521051885339807ULL, 158329567901874ULL, 740426481832143ULL,
+    1369811177301441ULL, 503351589084015ULL, 1781114827942261ULL, 1650493549693035ULL,
+    2174562418345156ULL, 456517194809244ULL, 2052761522121179ULL, 2233342271123682ULL,
+    1445872925177435ULL, 1131882576902813ULL, 220765848055241ULL, 1280259961403769ULL,
+    1581497080160712ULL, 1477441080108824ULL, 218428165202767ULL, 1970598141278907ULL,
+    643366736173069ULL, 2167909426804014ULL, 834993711408259ULL, 1922437166463212ULL,
+    1900036281472252ULL, 513794844386304ULL, 1297904164900114ULL, 1147626295373268ULL,
+    1910101606251299ULL, 182933838633381ULL, 806229530787362ULL, 155511666433200ULL,
+    290522463375462ULL, 534373523491751ULL, 1302938814480515ULL, 1664979184120445ULL,
+    304235649499423ULL, 339284524318609ULL, 1881717946973483ULL, 1670802286833842ULL,
+    2223637120675737ULL, 135818919485814ULL, 1144856572842792ULL, 2234981613434386ULL,
+    963917024969826ULL, 402275378284993ULL, 141532417412170ULL, 921537468739387ULL,
+    963905069722607ULL, 1405442890733358ULL, 1567763927164655ULL, 1664776329195930ULL,
+    2095924165508507ULL, 994243110271379ULL, 1243925610609353ULL, 1029845815569727ULL,
+    1001968867985629ULL, 170368934002484ULL, 1100906131583801ULL, 1825190326449569ULL,
+    1462285121182096ULL, 1545240767016377ULL, 797859025652273ULL, 1062758326657530ULL,
+    1125600735118266ULL, 739325756774527ULL, 1420144485966996ULL, 1915492743426702ULL,
+    752968196344993ULL, 882156396938351ULL, 1909097048763227ULL, 849058590685611ULL,
+    840754951388500ULL, 1832926948808323ULL, 2023317100075297ULL, 322382745442827ULL,
+    1569741341737601ULL, 1678986113194987ULL, 757598994581938ULL, 29678659580705ULL,
+    1239680935977986ULL, 1509239427168474ULL, 1055981929287006ULL, 1894085471158693ULL,
+    916486225488490ULL, 642168890366120ULL, 300453362620010ULL, 1858797242721481ULL,
+    2077989823177130ULL, 510228455273334ULL, 1473284798689270ULL, 5173934574301ULL,
+    765285232030050ULL, 1007154707631065ULL, 1862128712885972ULL, 168873464821340ULL,
+    1967853269759318ULL, 1489896018263031ULL, 592451806166369ULL, 1242298565603883ULL,
+    1838918921339058ULL, 697532763910695ULL, 294335466239059ULL, 135687058387449ULL,
+    2133734403874176ULL, 2121911143127699ULL, 20222476737364ULL, 1200824626476747ULL,
+    1397731736540791ULL, 702378430231418ULL, 59059527640068ULL, 460992547183981ULL,
+    1016125857842765ULL, 1273530839608957ULL, 96724128829301ULL, 1313433042425233ULL,
+    3543822857227ULL, 761975685357118ULL, 110417360745248ULL, 1079634164577663ULL,
+    2044574510020457ULL, 338709058603120ULL, 94541336042799ULL, 127963233585039ULL,
+    94427896272258ULL, 1143501979342182ULL, 1217958006212230ULL, 2153887831492134ULL,
+    1519219513255575ULL, 251793195454181ULL, 392517349345200ULL, 1507033011868881ULL,
+    2208494254670752ULL, 1364389582694359ULL, 2214069430728063ULL, 1272814257105752ULL,
+    741450148906352ULL, 1105776675555685ULL, 824447222014984ULL, 528745219306376ULL,
+    589427609121575ULL, 1501786838809155ULL, 379067373073147ULL, 184909476589356ULL,
+    1346887560616185ULL, 1932023742314082ULL, 1633302311869264ULL, 1685314821133069ULL,
+    1836610282047884ULL, 1595571594397150ULL, 615441688872198ULL, 1926435616702564ULL,
+    235632180396480ULL, 1051918343571810ULL, 2150570051687050ULL, 879198845408738ULL,
+    1443966275205464ULL, 481362545245088ULL, 512807443532642ULL, 641147578283480ULL,
+    1594276116945596ULL, 1844812743300602ULL, 2044559316019485ULL, 202620777969020ULL,
+    852992984136302ULL, 1500869642692910ULL, 1085216217052457ULL, 1736294372259758ULL,
+    2009666354486552ULL, 1262389020715248ULL, 1166527705256867ULL, 1409917450806036ULL,
+    1705819160057637ULL, 1116901782584378ULL, 1278460472285473ULL, 257879811360157ULL,
+    40314007176886ULL, 701309846749639ULL, 1380457676672777ULL, 631519782380272ULL,
+    1196339573466793ULL, 955537708940017ULL, 532725633381530ULL, 641190593731833ULL,
+    7214357153807ULL, 481922072107983ULL, 1634886189207352ULL, 1247659758261633ULL,
+    1655809614786430ULL, 43105797900223ULL, 76205809912607ULL, 1936575107455823ULL,
+    1107927314642236ULL, 2199986333469333ULL, 802974829322510ULL, 718173128143482ULL,
+    539385184235615ULL, 2075693785611221ULL, 953281147333690ULL, 1623571637172587ULL,
+    655274535022250ULL, 1568078078819021ULL, 101142125049712ULL, 1488441673350881ULL,
+    1457969561944515ULL, 1492622544287712ULL, 2041460689280803ULL, 1961848091392887ULL,
+    461003520846938ULL, 934728060399807ULL, 117723291519705ULL, 1027773762863526ULL,
+    56765304991567ULL, 2184028379550479ULL, 1768767711894030ULL, 1304432068983172ULL,
+    498080974452325ULL, 2134905654858163ULL, 1446137427202647ULL, 551613831549590ULL,
+    680288767054205ULL, 1278113339140386ULL, 378149431842614ULL, 80520494426960ULL,
+    2080985256348782ULL, 673432591799820ULL, 739189463724560ULL, 1847191452197509ULL,
+    527737312871602ULL, 477609358840073ULL, 1891633072677946ULL, 1841456828278466ULL,
+    2242502936489002ULL, 524791829362709ULL, 276648168514036ULL, 991706903257619ULL,
+    512580228297906ULL, 1216855104975946ULL, 67030930303149ULL, 769593945208213ULL,
+    2048873385103577ULL, 455635274123107ULL, 2077404927176696ULL, 1803539634652306ULL,
+    1837579953843417ULL, 1564240068662828ULL, 1964310918970435ULL, 832822906252492ULL,
+    1516044634195010ULL, 770571447506889ULL, 602215152486818ULL, 1760828333136947ULL,
+    730156776030376ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1129953239743101U, (uint64_t)1240339163956160U,
-    (uint64_t)61002583352401U, (uint64_t)2017604552196030U, (uint64_t)1576867829229863U,
-    (uint64_t)1508654942849389U, (uint64_t)270111619664077U, (uint64_t)1253097517254054U,
-    (uint64_t)721798270973250U, (uint64_t)161923365415298U, (uint64_t)828530877526011U,
-    (uint64_t)1494851059386763U, (uint64_t)662034171193976U, (uint64_t)1315349646974670U,
-    (uint64_t)2199229517308806U, (uint64_t)497078277852673U, (uint64_t)1310507715989956U,
-    (uint64_t)1881315714002105U, (uint64_t)2214039404983803U, (uint64_t)1331036420272667U,
-    (uint64_t)296286697520787U, (uint64_t)1179367922639127U, (uint64_t)25348441419697U,
-    (uint64_t)2200984961703188U, (uint64_t)150893128908291U, (uint64_t)1978614888570852U,
-    (uint64_t)1539657347172046U, (uint64_t)553810196523619U, (uint64_t)246017573977646U,
-    (uint64_t)1440448985385485U, (uint64_t)346049108099981U, (uint64_t)601166606218546U,
-    (uint64_t)855822004151713U, (uint64_t)1957521326383188U, (uint64_t)1114240380430887U,
-    (uint64_t)1349639675122048U, (uint64_t)957375954499040U, (uint64_t)111551795360136U,
-    (uint64_t)618586733648988U, (uint64_t)490708840688866U, (uint64_t)1267002049697314U,
-    (uint64_t)1130723224930028U, (uint64_t)215603029480828U, (uint64_t)1277138555414710U,
-    (uint64_t)1556750324971322U, (uint64_t)1407903521793741U, (uint64_t)1836836546590749U,
-    (uint64_t)576500297444199U, (uint64_t)2074707599091135U, (uint64_t)1826239864380012U,
-    (uint64_t)1935365705983312U, (uint64_t)239501825683682U, (uint64_t)1594236669034980U,
-    (uint64_t)1283078975055301U, (uint64_t)856745636255925U, (uint64_t)1342128647959981U,
-    (uint64_t)945216428379689U, (uint64_t)938746202496410U, (uint64_t)105775123333919U,
-    (uint64_t)1379852610117266U, (uint64_t)1770216827500275U, (uint64_t)1016017267535704U,
-    (uint64_t)1902885522469532U, (uint64_t)994184703730489U, (uint64_t)2227487538793763U,
-    (uint64_t)53155967096055U, (uint64_t)1264120808114350U, (uint64_t)1334928769376729U,
-    (uint64_t)393911808079997U, (uint64_t)826229239481845U, (uint64_t)1827903006733192U,
-    (uint64_t)1449283706008465U, (uint64_t)1258040415217849U, (uint64_t)1641484112868370U,
-    (uint64_t)1140150841968176U, (uint64_t)391113338021313U, (uint64_t)162138667815833U,
-    (uint64_t)742204396566060U, (uint64_t)110709233440557U, (uint64_t)90179377432917U,
-    (uint64_t)530511949644489U, (uint64_t)911568635552279U, (uint64_t)135869304780166U,
-    (uint64_t)617719999563692U, (uint64_t)1802525001631319U, (uint64_t)1836394639510490U,
-    (uint64_t)1862739456475085U, (uint64_t)1378284444664288U, (uint64_t)1617882529391756U,
-    (uint64_t)876124429891172U, (uint64_t)1147654641445091U, (uint64_t)1476943370400542U,
-    (uint64_t)688601222759067U, (uint64_t)2120281968990205U, (uint64_t)1387113236912611U,
-    (uint64_t)2125245820685788U, (uint64_t)1030674016350092U, (uint64_t)1594684598654247U,
-    (uint64_t)1165939511879820U, (uint64_t)271499323244173U, (uint64_t)546587254515484U,
-    (uint64_t)945603425742936U, (uint64_t)1242252568170226U, (uint64_t)561598728058142U,
-    (uint64_t)604827091794712U, (uint64_t)19869753585186U, (uint64_t)565367744708915U,
-    (uint64_t)536755754533603U, (uint64_t)1767258313589487U, (uint64_t)907952975936127U,
-    (uint64_t)292851652613937U, (uint64_t)163573546237963U, (uint64_t)837601408384564U,
-    (uint64_t)591996990118301U, (uint64_t)2126051747693057U, (uint64_t)182247548824566U,
-    (uint64_t)908369044122868U, (uint64_t)1335442699947273U, (uint64_t)2234292296528612U,
-    (uint64_t)689537529333034U, (uint64_t)2174778663790714U, (uint64_t)1011407643592667U,
-    (uint64_t)1856130618715473U, (uint64_t)1557437221651741U, (uint64_t)2250285407006102U,
-    (uint64_t)1412384213410827U, (uint64_t)1428042038612456U, (uint64_t)962709733973660U,
-    (uint64_t)313995703125919U, (uint64_t)1844969155869325U, (uint64_t)787716782673657U,
-    (uint64_t)622504542173478U, (uint64_t)930119043384654U, (uint64_t)2128870043952488U,
-    (uint64_t)537781531479523U, (uint64_t)1556666269904940U, (uint64_t)417333635741346U,
-    (uint64_t)1986743846438415U, (uint64_t)877620478041197U, (uint64_t)2205624582983829U,
-    (uint64_t)595260668884488U, (uint64_t)2025159350373157U, (uint64_t)2091659716088235U,
-    (uint64_t)1423634716596391U, (uint64_t)653686638634080U, (uint64_t)1972388399989956U,
-    (uint64_t)795575741798014U, (uint64_t)889240107997846U, (uint64_t)1446156876910732U,
-    (uint64_t)1028507012221776U, (uint64_t)1071697574586478U, (uint64_t)1689630411899691U,
-    (uint64_t)604092816502174U, (uint64_t)1909917373896122U, (uint64_t)1602544877643837U,
-    (uint64_t)1227177032923867U, (uint64_t)62684197535630U, (uint64_t)186146290753883U,
-    (uint64_t)414449055316766U, (uint64_t)1560555880866750U, (uint64_t)157579947096755U,
-    (uint64_t)230526795502384U, (uint64_t)1197673369665894U, (uint64_t)593779215869037U,
-    (uint64_t)214638834474097U, (uint64_t)1796344443484478U, (uint64_t)493550548257317U,
-    (uint64_t)1628442824033694U, (uint64_t)1410811655893495U, (uint64_t)1009361960995171U,
-    (uint64_t)604736219740352U, (uint64_t)392445928555351U, (uint64_t)1254295770295706U,
-    (uint64_t)1958074535046128U, (uint64_t)508699942241019U, (uint64_t)739405911261325U,
-    (uint64_t)1678760393882409U, (uint64_t)517763708545996U, (uint64_t)640040257898722U,
-    (uint64_t)384966810872913U, (uint64_t)407454748380128U, (uint64_t)152604679407451U,
-    (uint64_t)185102854927662U, (uint64_t)1448175503649595U, (uint64_t)100328519208674U,
-    (uint64_t)1153263667012830U, (uint64_t)1643926437586490U, (uint64_t)609632142834154U,
-    (uint64_t)980984004749261U, (uint64_t)855290732258779U, (uint64_t)2186022163021506U,
-    (uint64_t)1254052618626070U, (uint64_t)1850030517182611U, (uint64_t)162348933090207U,
-    (uint64_t)1948712273679932U, (uint64_t)1331832516262191U, (uint64_t)1219400369175863U,
-    (uint64_t)89689036937483U, (uint64_t)1554886057235815U, (uint64_t)1520047528432789U,
-    (uint64_t)81263957652811U, (uint64_t)146612464257008U, (uint64_t)2207945627164163U,
-    (uint64_t)919846660682546U, (uint64_t)1925694087906686U, (uint64_t)2102027292388012U,
-    (uint64_t)887992003198635U, (uint64_t)1817924871537027U, (uint64_t)746660005584342U,
-    (uint64_t)753757153275525U, (uint64_t)91394270908699U, (uint64_t)511837226544151U,
-    (uint64_t)736341543649373U, (uint64_t)1256371121466367U, (uint64_t)1977778299551813U,
-    (uint64_t)817915174462263U, (uint64_t)1602323381418035U, (uint64_t)190035164572930U,
-    (uint64_t)603796401391181U, (uint64_t)2152666873671669U, (uint64_t)1813900316324112U,
-    (uint64_t)1292622433358041U, (uint64_t)888439870199892U, (uint64_t)978918155071994U,
-    (uint64_t)534184417909805U, (uint64_t)466460084317313U, (uint64_t)1275223140288685U,
-    (uint64_t)786407043883517U, (uint64_t)1620520623925754U, (uint64_t)1753625021290269U,
-    (uint64_t)751937175104525U, (uint64_t)905301961820613U, (uint64_t)697059847245437U,
-    (uint64_t)584919033981144U, (uint64_t)1272165506533156U, (uint64_t)1532180021450866U,
-    (uint64_t)1901407354005301U, (uint64_t)1421319720492586U, (uint64_t)2179081609765456U,
-    (uint64_t)2193253156667632U, (uint64_t)1080248329608584U, (uint64_t)2158422436462066U,
-    (uint64_t)759167597017850U, (uint64_t)545759071151285U, (uint64_t)641600428493698U,
-    (uint64_t)943791424499848U, (uint64_t)469571542427864U, (uint64_t)951117845222467U,
-    (uint64_t)1780538594373407U, (uint64_t)614611122040309U, (uint64_t)1354826131886963U,
-    (uint64_t)221898131992340U, (uint64_t)1145699723916219U, (uint64_t)798735379961769U,
-    (uint64_t)1843560518208287U, (uint64_t)1424523160161545U, (uint64_t)205549016574779U,
-    (uint64_t)2239491587362749U, (uint64_t)1918363582399888U, (uint64_t)1292183072788455U,
-    (uint64_t)1783513123192567U, (uint64_t)1584027954317205U, (uint64_t)1890421443925740U,
-    (uint64_t)1718459319874929U, (uint64_t)1522091040748809U, (uint64_t)399467600667219U,
-    (uint64_t)1870973059066576U, (uint64_t)287514433150348U, (uint64_t)1397845311152885U,
-    (uint64_t)1880440629872863U, (uint64_t)709302939340341U, (uint64_t)1813571361109209U,
-    (uint64_t)86598795876860U, (uint64_t)1146964554310612U, (uint64_t)1590956584862432U,
-    (uint64_t)2097004628155559U, (uint64_t)656227622102390U, (uint64_t)1808500445541891U,
-    (uint64_t)958336726523135U, (uint64_t)2007604569465975U, (uint64_t)313504950390997U,
-    (uint64_t)1399686004953620U, (uint64_t)1759732788465234U, (uint64_t)1562539721055836U,
-    (uint64_t)1575722765016293U, (uint64_t)793318366641259U, (uint64_t)443876859384887U,
-    (uint64_t)547308921989704U, (uint64_t)636698687503328U, (uint64_t)2179175835287340U,
-    (uint64_t)498333551718258U, (uint64_t)932248760026176U, (uint64_t)1612395686304653U,
-    (uint64_t)2179774103745626U, (uint64_t)1359658123541018U, (uint64_t)171488501802442U,
-    (uint64_t)1625034951791350U, (uint64_t)520196922773633U, (uint64_t)1873787546341877U,
-    (uint64_t)303457823885368U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1129953239743101ULL, 1240339163956160ULL, 61002583352401ULL,
+    2017604552196030ULL, 1576867829229863ULL, 1508654942849389ULL, 270111619664077ULL,
+    1253097517254054ULL, 721798270973250ULL, 161923365415298ULL, 828530877526011ULL,
+    1494851059386763ULL, 662034171193976ULL, 1315349646974670ULL, 2199229517308806ULL,
+    497078277852673ULL, 1310507715989956ULL, 1881315714002105ULL, 2214039404983803ULL,
+    1331036420272667ULL, 296286697520787ULL, 1179367922639127ULL, 25348441419697ULL,
+    2200984961703188ULL, 150893128908291ULL, 1978614888570852ULL, 1539657347172046ULL,
+    553810196523619ULL, 246017573977646ULL, 1440448985385485ULL, 346049108099981ULL,
+    601166606218546ULL, 855822004151713ULL, 1957521326383188ULL, 1114240380430887ULL,
+    1349639675122048ULL, 957375954499040ULL, 111551795360136ULL, 618586733648988ULL,
+    490708840688866ULL, 1267002049697314ULL, 1130723224930028ULL, 215603029480828ULL,
+    1277138555414710ULL, 1556750324971322ULL, 1407903521793741ULL, 1836836546590749ULL,
+    576500297444199ULL, 2074707599091135ULL, 1826239864380012ULL, 1935365705983312ULL,
+    239501825683682ULL, 1594236669034980ULL, 1283078975055301ULL, 856745636255925ULL,
+    1342128647959981ULL, 945216428379689ULL, 938746202496410ULL, 105775123333919ULL,
+    1379852610117266ULL, 1770216827500275ULL, 1016017267535704ULL, 1902885522469532ULL,
+    994184703730489ULL, 2227487538793763ULL, 53155967096055ULL, 1264120808114350ULL,
+    1334928769376729ULL, 393911808079997ULL, 826229239481845ULL, 1827903006733192ULL,
+    1449283706008465ULL, 1258040415217849ULL, 1641484112868370ULL, 1140150841968176ULL,
+    391113338021313ULL, 162138667815833ULL, 742204396566060ULL, 110709233440557ULL,
+    90179377432917ULL, 530511949644489ULL, 911568635552279ULL, 135869304780166ULL,
+    617719999563692ULL, 1802525001631319ULL, 1836394639510490ULL, 1862739456475085ULL,
+    1378284444664288ULL, 1617882529391756ULL, 876124429891172ULL, 1147654641445091ULL,
+    1476943370400542ULL, 688601222759067ULL, 2120281968990205ULL, 1387113236912611ULL,
+    2125245820685788ULL, 1030674016350092ULL, 1594684598654247ULL, 1165939511879820ULL,
+    271499323244173ULL, 546587254515484ULL, 945603425742936ULL, 1242252568170226ULL,
+    561598728058142ULL, 604827091794712ULL, 19869753585186ULL, 565367744708915ULL,
+    536755754533603ULL, 1767258313589487ULL, 907952975936127ULL, 292851652613937ULL,
+    163573546237963ULL, 837601408384564ULL, 591996990118301ULL, 2126051747693057ULL,
+    182247548824566ULL, 908369044122868ULL, 1335442699947273ULL, 2234292296528612ULL,
+    689537529333034ULL, 2174778663790714ULL, 1011407643592667ULL, 1856130618715473ULL,
+    1557437221651741ULL, 2250285407006102ULL, 1412384213410827ULL, 1428042038612456ULL,
+    962709733973660ULL, 313995703125919ULL, 1844969155869325ULL, 787716782673657ULL,
+    622504542173478ULL, 930119043384654ULL, 2128870043952488ULL, 537781531479523ULL,
+    1556666269904940ULL, 417333635741346ULL, 1986743846438415ULL, 877620478041197ULL,
+    2205624582983829ULL, 595260668884488ULL, 2025159350373157ULL, 2091659716088235ULL,
+    1423634716596391ULL, 653686638634080ULL, 1972388399989956ULL, 795575741798014ULL,
+    889240107997846ULL, 1446156876910732ULL, 1028507012221776ULL, 1071697574586478ULL,
+    1689630411899691ULL, 604092816502174ULL, 1909917373896122ULL, 1602544877643837ULL,
+    1227177032923867ULL, 62684197535630ULL, 186146290753883ULL, 414449055316766ULL,
+    1560555880866750ULL, 157579947096755ULL, 230526795502384ULL, 1197673369665894ULL,
+    593779215869037ULL, 214638834474097ULL, 1796344443484478ULL, 493550548257317ULL,
+    1628442824033694ULL, 1410811655893495ULL, 1009361960995171ULL, 604736219740352ULL,
+    392445928555351ULL, 1254295770295706ULL, 1958074535046128ULL, 508699942241019ULL,
+    739405911261325ULL, 1678760393882409ULL, 517763708545996ULL, 640040257898722ULL,
+    384966810872913ULL, 407454748380128ULL, 152604679407451ULL, 185102854927662ULL,
+    1448175503649595ULL, 100328519208674ULL, 1153263667012830ULL, 1643926437586490ULL,
+    609632142834154ULL, 980984004749261ULL, 855290732258779ULL, 2186022163021506ULL,
+    1254052618626070ULL, 1850030517182611ULL, 162348933090207ULL, 1948712273679932ULL,
+    1331832516262191ULL, 1219400369175863ULL, 89689036937483ULL, 1554886057235815ULL,
+    1520047528432789ULL, 81263957652811ULL, 146612464257008ULL, 2207945627164163ULL,
+    919846660682546ULL, 1925694087906686ULL, 2102027292388012ULL, 887992003198635ULL,
+    1817924871537027ULL, 746660005584342ULL, 753757153275525ULL, 91394270908699ULL,
+    511837226544151ULL, 736341543649373ULL, 1256371121466367ULL, 1977778299551813ULL,
+    817915174462263ULL, 1602323381418035ULL, 190035164572930ULL, 603796401391181ULL,
+    2152666873671669ULL, 1813900316324112ULL, 1292622433358041ULL, 888439870199892ULL,
+    978918155071994ULL, 534184417909805ULL, 466460084317313ULL, 1275223140288685ULL,
+    786407043883517ULL, 1620520623925754ULL, 1753625021290269ULL, 751937175104525ULL,
+    905301961820613ULL, 697059847245437ULL, 584919033981144ULL, 1272165506533156ULL,
+    1532180021450866ULL, 1901407354005301ULL, 1421319720492586ULL, 2179081609765456ULL,
+    2193253156667632ULL, 1080248329608584ULL, 2158422436462066ULL, 759167597017850ULL,
+    545759071151285ULL, 641600428493698ULL, 943791424499848ULL, 469571542427864ULL,
+    951117845222467ULL, 1780538594373407ULL, 614611122040309ULL, 1354826131886963ULL,
+    221898131992340ULL, 1145699723916219ULL, 798735379961769ULL, 1843560518208287ULL,
+    1424523160161545ULL, 205549016574779ULL, 2239491587362749ULL, 1918363582399888ULL,
+    1292183072788455ULL, 1783513123192567ULL, 1584027954317205ULL, 1890421443925740ULL,
+    1718459319874929ULL, 1522091040748809ULL, 399467600667219ULL, 1870973059066576ULL,
+    287514433150348ULL, 1397845311152885ULL, 1880440629872863ULL, 709302939340341ULL,
+    1813571361109209ULL, 86598795876860ULL, 1146964554310612ULL, 1590956584862432ULL,
+    2097004628155559ULL, 656227622102390ULL, 1808500445541891ULL, 958336726523135ULL,
+    2007604569465975ULL, 313504950390997ULL, 1399686004953620ULL, 1759732788465234ULL,
+    1562539721055836ULL, 1575722765016293ULL, 793318366641259ULL, 443876859384887ULL,
+    547308921989704ULL, 636698687503328ULL, 2179175835287340ULL, 498333551718258ULL,
+    932248760026176ULL, 1612395686304653ULL, 2179774103745626ULL, 1359658123541018ULL,
+    171488501802442ULL, 1625034951791350ULL, 520196922773633ULL, 1873787546341877ULL,
+    303457823885368ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5[640U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1738742601995546U, (uint64_t)1146398526822698U,
-    (uint64_t)2070867633025821U, (uint64_t)562264141797630U, (uint64_t)587772402128613U,
-    (uint64_t)1801439850948184U, (uint64_t)1351079888211148U, (uint64_t)450359962737049U,
-    (uint64_t)900719925474099U, (uint64_t)1801439850948198U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1841354044333475U,
-    (uint64_t)16398895984059U, (uint64_t)755974180946558U, (uint64_t)900171276175154U,
-    (uint64_t)1821297809914039U, (uint64_t)1661154287933054U, (uint64_t)284530020860578U,
-    (uint64_t)1390261174866914U, (uint64_t)1524110943907984U, (uint64_t)1045603498418422U,
-    (uint64_t)928651508580478U, (uint64_t)1383326941296346U, (uint64_t)961937908925785U,
-    (uint64_t)80455759693706U, (uint64_t)904734540352947U, (uint64_t)1507481815385608U,
-    (uint64_t)2223447444246085U, (uint64_t)1083941587175919U, (uint64_t)2059929906842505U,
-    (uint64_t)1581435440146976U, (uint64_t)782730187692425U, (uint64_t)9928394897574U,
-    (uint64_t)1539449519985236U, (uint64_t)1923587931078510U, (uint64_t)552919286076056U,
-    (uint64_t)376925408065760U, (uint64_t)447320488831784U, (uint64_t)1362918338468019U,
-    (uint64_t)1470031896696846U, (uint64_t)2189796996539902U, (uint64_t)1337552949959847U,
-    (uint64_t)1762287177775726U, (uint64_t)237994495816815U, (uint64_t)1277840395970544U,
-    (uint64_t)543972849007241U, (uint64_t)1224692671618814U, (uint64_t)162359533289271U,
-    (uint64_t)282240927125249U, (uint64_t)586909166382289U, (uint64_t)17726488197838U,
-    (uint64_t)377014554985659U, (uint64_t)1433835303052512U, (uint64_t)702061469493692U,
-    (uint64_t)1142253108318154U, (uint64_t)318297794307551U, (uint64_t)954362646308543U,
-    (uint64_t)517363881452320U, (uint64_t)1868013482130416U, (uint64_t)262562472373260U,
-    (uint64_t)902232853249919U, (uint64_t)2107343057055746U, (uint64_t)462368348619024U,
-    (uint64_t)1893758677092974U, (uint64_t)2177729767846389U, (uint64_t)2168532543559143U,
-    (uint64_t)443867094639821U, (uint64_t)730169342581022U, (uint64_t)1564589016879755U,
-    (uint64_t)51218195700649U, (uint64_t)76684578423745U, (uint64_t)560266272480743U,
-    (uint64_t)922517457707697U, (uint64_t)2066645939860874U, (uint64_t)1318277348414638U,
-    (uint64_t)1576726809084003U, (uint64_t)1817337608563665U, (uint64_t)1874240939237666U,
-    (uint64_t)754733726333910U, (uint64_t)97085310406474U, (uint64_t)751148364309235U,
-    (uint64_t)1622159695715187U, (uint64_t)1444098819684916U, (uint64_t)130920805558089U,
-    (uint64_t)1260449179085308U, (uint64_t)1860021740768461U, (uint64_t)110052860348509U,
-    (uint64_t)193830891643810U, (uint64_t)164148413933881U, (uint64_t)180017794795332U,
-    (uint64_t)1523506525254651U, (uint64_t)465981629225956U, (uint64_t)559733514964572U,
-    (uint64_t)1279624874416974U, (uint64_t)2026642326892306U, (uint64_t)1425156829982409U,
-    (uint64_t)2160936383793147U, (uint64_t)1061870624975247U, (uint64_t)2023497043036941U,
-    (uint64_t)117942212883190U, (uint64_t)490339622800774U, (uint64_t)1729931303146295U,
-    (uint64_t)422305932971074U, (uint64_t)529103152793096U, (uint64_t)1211973233775992U,
-    (uint64_t)721364955929681U, (uint64_t)1497674430438813U, (uint64_t)342545521275073U,
-    (uint64_t)2102107575279372U, (uint64_t)2108462244669966U, (uint64_t)1382582406064082U,
-    (uint64_t)2206396818383323U, (uint64_t)2109093268641147U, (uint64_t)10809845110983U,
-    (uint64_t)1605176920880099U, (uint64_t)744640650753946U, (uint64_t)1712758897518129U,
-    (uint64_t)373410811281809U, (uint64_t)648838265800209U, (uint64_t)813058095530999U,
-    (uint64_t)513987632620169U, (uint64_t)465516160703329U, (uint64_t)2136322186126330U,
-    (uint64_t)1979645899422932U, (uint64_t)1197131006470786U, (uint64_t)1467836664863979U,
-    (uint64_t)1340751381374628U, (uint64_t)1810066212667962U, (uint64_t)1009933588225499U,
-    (uint64_t)1106129188080873U, (uint64_t)1388980405213901U, (uint64_t)533719246598044U,
-    (uint64_t)1169435803073277U, (uint64_t)198920999285821U, (uint64_t)487492330629854U,
-    (uint64_t)1807093008537778U, (uint64_t)1540899012923865U, (uint64_t)2075080271659867U,
-    (uint64_t)1527990806921523U, (uint64_t)1323728742908002U, (uint64_t)1568595959608205U,
-    (uint64_t)1388032187497212U, (uint64_t)2026968840050568U, (uint64_t)1396591153295755U,
-    (uint64_t)820416950170901U, (uint64_t)520060313205582U, (uint64_t)2016404325094901U,
-    (uint64_t)1584709677868520U, (uint64_t)272161374469956U, (uint64_t)1567188603996816U,
-    (uint64_t)1986160530078221U, (uint64_t)553930264324589U, (uint64_t)1058426729027503U,
-    (uint64_t)8762762886675U, (uint64_t)2216098143382988U, (uint64_t)1835145266889223U,
-    (uint64_t)1712936431558441U, (uint64_t)1017009937844974U, (uint64_t)585361667812740U,
-    (uint64_t)2114711541628181U, (uint64_t)2238729632971439U, (uint64_t)121257546253072U,
-    (uint64_t)847154149018345U, (uint64_t)211972965476684U, (uint64_t)287499084460129U,
-    (uint64_t)2098247259180197U, (uint64_t)839070411583329U, (uint64_t)339551619574372U,
-    (uint64_t)1432951287640743U, (uint64_t)526481249498942U, (uint64_t)931991661905195U,
-    (uint64_t)1884279965674487U, (uint64_t)200486405604411U, (uint64_t)364173020594788U,
-    (uint64_t)518034455936955U, (uint64_t)1085564703965501U, (uint64_t)16030410467927U,
-    (uint64_t)604865933167613U, (uint64_t)1695298441093964U, (uint64_t)498856548116159U,
-    (uint64_t)2193030062787034U, (uint64_t)1706339802964179U, (uint64_t)1721199073493888U,
-    (uint64_t)820740951039755U, (uint64_t)1216053436896834U, (uint64_t)23954895815139U,
-    (uint64_t)1662515208920491U, (uint64_t)1705443427511899U, (uint64_t)1957928899570365U,
-    (uint64_t)1189636258255725U, (uint64_t)1795695471103809U, (uint64_t)1691191297654118U,
-    (uint64_t)282402585374360U, (uint64_t)460405330264832U, (uint64_t)63765529445733U,
-    (uint64_t)469763447404473U, (uint64_t)733607089694996U, (uint64_t)685410420186959U,
-    (uint64_t)1096682630419738U, (uint64_t)1162548510542362U, (uint64_t)1020949526456676U,
-    (uint64_t)1211660396870573U, (uint64_t)613126398222696U, (uint64_t)1117829165843251U,
-    (uint64_t)742432540886650U, (uint64_t)1483755088010658U, (uint64_t)942392007134474U,
-    (uint64_t)1447834130944107U, (uint64_t)489368274863410U, (uint64_t)23192985544898U,
-    (uint64_t)648442406146160U, (uint64_t)785438843373876U, (uint64_t)249464684645238U,
-    (uint64_t)170494608205618U, (uint64_t)335112827260550U, (uint64_t)1462050123162735U,
-    (uint64_t)1084803668439016U, (uint64_t)853459233600325U, (uint64_t)215777728187495U,
-    (uint64_t)1965759433526974U, (uint64_t)1349482894446537U, (uint64_t)694163317612871U,
-    (uint64_t)860536766165036U, (uint64_t)1178788094084321U, (uint64_t)1652739626626996U,
-    (uint64_t)2115723946388185U, (uint64_t)1577204379094664U, (uint64_t)1083882859023240U,
-    (uint64_t)1768759143381635U, (uint64_t)1737180992507258U, (uint64_t)246054513922239U,
-    (uint64_t)577253134087234U, (uint64_t)356340280578042U, (uint64_t)1638917769925142U,
-    (uint64_t)223550348130103U, (uint64_t)470592666638765U, (uint64_t)22663573966996U,
-    (uint64_t)596552461152400U, (uint64_t)364143537069499U, (uint64_t)3942119457699U,
-    (uint64_t)107951982889287U, (uint64_t)1843471406713209U, (uint64_t)1625773041610986U,
-    (uint64_t)1466141092501702U, (uint64_t)1043024095021271U, (uint64_t)310429964047508U,
-    (uint64_t)98559121500372U, (uint64_t)152746933782868U, (uint64_t)259407205078261U,
-    (uint64_t)828123093322585U, (uint64_t)1576847274280091U, (uint64_t)1170871375757302U,
-    (uint64_t)1588856194642775U, (uint64_t)984767822341977U, (uint64_t)1141497997993760U,
-    (uint64_t)809325345150796U, (uint64_t)1879837728202511U, (uint64_t)201340910657893U,
-    (uint64_t)1079157558888483U, (uint64_t)1052373448588065U, (uint64_t)1732036202501778U,
-    (uint64_t)2105292670328445U, (uint64_t)679751387312402U, (uint64_t)1679682144926229U,
-    (uint64_t)1695823455818780U, (uint64_t)498852317075849U, (uint64_t)1786555067788433U,
-    (uint64_t)1670727545779425U, (uint64_t)117945875433544U, (uint64_t)407939139781844U,
-    (uint64_t)854632120023778U, (uint64_t)1413383148360437U, (uint64_t)286030901733673U,
-    (uint64_t)1207361858071196U, (uint64_t)461340408181417U, (uint64_t)1096919590360164U,
-    (uint64_t)1837594897475685U, (uint64_t)533755561544165U, (uint64_t)1638688042247712U,
-    (uint64_t)1431653684793005U, (uint64_t)1036458538873559U, (uint64_t)390822120341779U,
-    (uint64_t)1920929837111618U, (uint64_t)543426740024168U, (uint64_t)645751357799929U,
-    (uint64_t)2245025632994463U, (uint64_t)1550778638076452U, (uint64_t)223738153459949U,
-    (uint64_t)1337209385492033U, (uint64_t)1276967236456531U, (uint64_t)1463815821063071U,
-    (uint64_t)2070620870191473U, (uint64_t)1199170709413753U, (uint64_t)273230877394166U,
-    (uint64_t)1873264887608046U, (uint64_t)890877152910775U, (uint64_t)983226445635730U,
-    (uint64_t)44873798519521U, (uint64_t)697147127512130U, (uint64_t)961631038239304U,
-    (uint64_t)709966160696826U, (uint64_t)1706677689540366U, (uint64_t)502782733796035U,
-    (uint64_t)812545535346033U, (uint64_t)1693622521296452U, (uint64_t)1955813093002510U,
-    (uint64_t)1259937612881362U, (uint64_t)1873032503803559U, (uint64_t)1140330566016428U,
-    (uint64_t)1675726082440190U, (uint64_t)60029928909786U, (uint64_t)170335608866763U,
-    (uint64_t)766444312315022U, (uint64_t)2025049511434113U, (uint64_t)2200845622430647U,
-    (uint64_t)1201269851450408U, (uint64_t)590071752404907U, (uint64_t)1400995030286946U,
-    (uint64_t)2152637413853822U, (uint64_t)2108495473841983U, (uint64_t)3855406710349U,
-    (uint64_t)1726137673168580U, (uint64_t)51004317200100U, (uint64_t)1749082328586939U,
-    (uint64_t)1704088976144558U, (uint64_t)1977318954775118U, (uint64_t)2062602253162400U,
-    (uint64_t)948062503217479U, (uint64_t)361953965048030U, (uint64_t)1528264887238440U,
-    (uint64_t)62582552172290U, (uint64_t)2241602163389280U, (uint64_t)156385388121765U,
-    (uint64_t)2124100319761492U, (uint64_t)388928050571382U, (uint64_t)1556123596922727U,
-    (uint64_t)979310669812384U, (uint64_t)113043855206104U, (uint64_t)2023223924825469U,
-    (uint64_t)643651703263034U, (uint64_t)2234446903655540U, (uint64_t)1577241261424997U,
-    (uint64_t)860253174523845U, (uint64_t)1691026473082448U, (uint64_t)1091672764933872U,
-    (uint64_t)1957463109756365U, (uint64_t)530699502660193U, (uint64_t)349587141723569U,
-    (uint64_t)674661681919563U, (uint64_t)1633727303856240U, (uint64_t)708909037922144U,
-    (uint64_t)2160722508518119U, (uint64_t)1302188051602540U, (uint64_t)976114603845777U,
-    (uint64_t)120004758721939U, (uint64_t)1681630708873780U, (uint64_t)622274095069244U,
-    (uint64_t)1822346309016698U, (uint64_t)1100921177951904U, (uint64_t)2216952659181677U,
-    (uint64_t)1844020550362490U, (uint64_t)1976451368365774U, (uint64_t)1321101422068822U,
-    (uint64_t)1189859436282668U, (uint64_t)2008801879735257U, (uint64_t)2219413454333565U,
-    (uint64_t)424288774231098U, (uint64_t)359793146977912U, (uint64_t)270293357948703U,
-    (uint64_t)587226003677000U, (uint64_t)1482071926139945U, (uint64_t)1419630774650359U,
-    (uint64_t)1104739070570175U, (uint64_t)1662129023224130U, (uint64_t)1609203612533411U,
-    (uint64_t)1250932720691980U, (uint64_t)95215711818495U, (uint64_t)498746909028150U,
-    (uint64_t)158151296991874U, (uint64_t)1201379988527734U, (uint64_t)561599945143989U,
-    (uint64_t)2211577425617888U, (uint64_t)2166577612206324U, (uint64_t)1057590354233512U,
-    (uint64_t)1968123280416769U, (uint64_t)1316586165401313U, (uint64_t)762728164447634U,
-    (uint64_t)2045395244316047U, (uint64_t)1531796898725716U, (uint64_t)315385971670425U,
-    (uint64_t)1109421039396756U, (uint64_t)2183635256408562U, (uint64_t)1896751252659461U,
-    (uint64_t)840236037179080U, (uint64_t)796245792277211U, (uint64_t)508345890111193U,
-    (uint64_t)1275386465287222U, (uint64_t)513560822858784U, (uint64_t)1784735733120313U,
-    (uint64_t)1346467478899695U, (uint64_t)601125231208417U, (uint64_t)701076661112726U,
-    (uint64_t)1841998436455089U, (uint64_t)1156768600940434U, (uint64_t)1967853462343221U,
-    (uint64_t)2178318463061452U, (uint64_t)481885520752741U, (uint64_t)675262828640945U,
-    (uint64_t)1033539418596582U, (uint64_t)1743329872635846U, (uint64_t)159322641251283U,
-    (uint64_t)1573076470127113U, (uint64_t)954827619308195U, (uint64_t)778834750662635U,
-    (uint64_t)619912782122617U, (uint64_t)515681498488209U, (uint64_t)1675866144246843U,
-    (uint64_t)811716020969981U, (uint64_t)1125515272217398U, (uint64_t)1398917918287342U,
-    (uint64_t)1301680949183175U, (uint64_t)726474739583734U, (uint64_t)587246193475200U,
-    (uint64_t)1096581582611864U, (uint64_t)1469911826213486U, (uint64_t)1990099711206364U,
-    (uint64_t)1256496099816508U, (uint64_t)2019924615195672U, (uint64_t)1251232456707555U,
-    (uint64_t)2042971196009755U, (uint64_t)214061878479265U, (uint64_t)115385726395472U,
-    (uint64_t)1677875239524132U, (uint64_t)756888883383540U, (uint64_t)1153862117756233U,
-    (uint64_t)503391530851096U, (uint64_t)946070017477513U, (uint64_t)1878319040542579U,
-    (uint64_t)1101349418586920U, (uint64_t)793245696431613U, (uint64_t)397920495357645U,
-    (uint64_t)2174023872951112U, (uint64_t)1517867915189593U, (uint64_t)1829855041462995U,
-    (uint64_t)1046709983503619U, (uint64_t)424081940711857U, (uint64_t)2112438073094647U,
-    (uint64_t)1504338467349861U, (uint64_t)2244574127374532U, (uint64_t)2136937537441911U,
-    (uint64_t)1741150838990304U, (uint64_t)25894628400571U, (uint64_t)512213526781178U,
-    (uint64_t)1168384260796379U, (uint64_t)1424607682379833U, (uint64_t)938677789731564U,
-    (uint64_t)872882241891896U, (uint64_t)1713199397007700U, (uint64_t)1410496326218359U,
-    (uint64_t)854379752407031U, (uint64_t)465141611727634U, (uint64_t)315176937037857U,
-    (uint64_t)1020115054571233U, (uint64_t)1856290111077229U, (uint64_t)2028366269898204U,
-    (uint64_t)1432980880307543U, (uint64_t)469932710425448U, (uint64_t)581165267592247U,
-    (uint64_t)496399148156603U, (uint64_t)2063435226705903U, (uint64_t)2116841086237705U,
-    (uint64_t)498272567217048U, (uint64_t)1829438076967906U, (uint64_t)1573925801278491U,
-    (uint64_t)460763576329867U, (uint64_t)1705264723728225U, (uint64_t)999514866082412U,
-    (uint64_t)29635061779362U, (uint64_t)1884233592281020U, (uint64_t)1449755591461338U,
-    (uint64_t)42579292783222U, (uint64_t)1869504355369200U, (uint64_t)495506004805251U,
-    (uint64_t)264073104888427U, (uint64_t)2088880861028612U, (uint64_t)104646456386576U,
-    (uint64_t)1258445191399967U, (uint64_t)1348736801545799U, (uint64_t)2068276361286613U,
-    (uint64_t)884897216646374U, (uint64_t)922387476801376U, (uint64_t)1043886580402805U,
-    (uint64_t)1240883498470831U, (uint64_t)1601554651937110U, (uint64_t)804382935289482U,
-    (uint64_t)512379564477239U, (uint64_t)1466384519077032U, (uint64_t)1280698500238386U,
-    (uint64_t)211303836685749U, (uint64_t)2081725624793803U, (uint64_t)545247644516879U,
-    (uint64_t)215313359330384U, (uint64_t)286479751145614U, (uint64_t)2213650281751636U,
-    (uint64_t)2164927945999874U, (uint64_t)2072162991540882U, (uint64_t)1443769115444779U,
-    (uint64_t)1581473274363095U, (uint64_t)434633875922699U, (uint64_t)340456055781599U,
-    (uint64_t)373043091080189U, (uint64_t)839476566531776U, (uint64_t)1856706858509978U,
-    (uint64_t)931616224909153U, (uint64_t)1888181317414065U, (uint64_t)213654322650262U,
-    (uint64_t)1161078103416244U, (uint64_t)1822042328851513U, (uint64_t)915817709028812U,
-    (uint64_t)1828297056698188U, (uint64_t)1212017130909403U, (uint64_t)60258343247333U,
-    (uint64_t)342085800008230U, (uint64_t)930240559508270U, (uint64_t)1549884999174952U,
-    (uint64_t)809895264249462U, (uint64_t)184726257947682U, (uint64_t)1157065433504828U,
-    (uint64_t)1209999630381477U, (uint64_t)999920399374391U, (uint64_t)1714770150788163U,
-    (uint64_t)2026130985413228U, (uint64_t)506776632883140U, (uint64_t)1349042668246528U,
-    (uint64_t)1937232292976967U, (uint64_t)942302637530730U, (uint64_t)160211904766226U,
-    (uint64_t)1042724500438571U, (uint64_t)212454865139142U, (uint64_t)244104425172642U,
-    (uint64_t)1376990622387496U, (uint64_t)76126752421227U, (uint64_t)1027540886376422U,
-    (uint64_t)1912210655133026U, (uint64_t)13410411589575U, (uint64_t)1475856708587773U,
-    (uint64_t)615563352691682U, (uint64_t)1446629324872644U, (uint64_t)1683670301784014U,
-    (uint64_t)1049873327197127U, (uint64_t)1826401704084838U, (uint64_t)2032577048760775U,
-    (uint64_t)1922203607878853U, (uint64_t)836708788764806U, (uint64_t)2193084654695012U,
-    (uint64_t)1342923183256659U, (uint64_t)849356986294271U, (uint64_t)1228863973965618U,
-    (uint64_t)94886161081867U, (uint64_t)1423288430204892U, (uint64_t)2016167528707016U,
-    (uint64_t)1633187660972877U, (uint64_t)1550621242301752U, (uint64_t)340630244512994U,
-    (uint64_t)2103577710806901U, (uint64_t)221625016538931U, (uint64_t)421544147350960U,
-    (uint64_t)580428704555156U, (uint64_t)1479831381265617U, (uint64_t)518057926544698U,
-    (uint64_t)955027348790630U, (uint64_t)1326749172561598U, (uint64_t)1118304625755967U,
-    (uint64_t)1994005916095176U, (uint64_t)1799757332780663U, (uint64_t)751343129396941U,
-    (uint64_t)1468672898746144U, (uint64_t)1451689964451386U, (uint64_t)755070293921171U,
-    (uint64_t)904857405877052U, (uint64_t)1276087530766984U, (uint64_t)403986562858511U,
-    (uint64_t)1530661255035337U, (uint64_t)1644972908910502U, (uint64_t)1370170080438957U,
-    (uint64_t)139839536695744U, (uint64_t)909930462436512U, (uint64_t)1899999215356933U,
-    (uint64_t)635992381064566U, (uint64_t)788740975837654U, (uint64_t)224241231493695U,
-    (uint64_t)1267090030199302U, (uint64_t)998908061660139U, (uint64_t)1784537499699278U,
-    (uint64_t)859195370018706U, (uint64_t)1953966091439379U, (uint64_t)2189271820076010U,
-    (uint64_t)2039067059943978U, (uint64_t)1526694380855202U, (uint64_t)2040321513194941U,
-    (uint64_t)329922071218689U, (uint64_t)1953032256401326U, (uint64_t)989631424403521U,
-    (uint64_t)328825014934242U, (uint64_t)9407151397696U, (uint64_t)63551373671268U,
-    (uint64_t)1624728632895792U, (uint64_t)1608324920739262U, (uint64_t)1178239350351945U,
-    (uint64_t)1198077399579702U, (uint64_t)277620088676229U, (uint64_t)1775359437312528U,
-    (uint64_t)1653558177737477U, (uint64_t)1652066043408850U, (uint64_t)1063359889686622U,
-    (uint64_t)1975063804860653U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1738742601995546ULL, 1146398526822698ULL, 2070867633025821ULL,
+    562264141797630ULL, 587772402128613ULL, 1801439850948184ULL, 1351079888211148ULL,
+    450359962737049ULL, 900719925474099ULL, 1801439850948198ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1841354044333475ULL, 16398895984059ULL, 755974180946558ULL, 900171276175154ULL,
+    1821297809914039ULL, 1661154287933054ULL, 284530020860578ULL, 1390261174866914ULL,
+    1524110943907984ULL, 1045603498418422ULL, 928651508580478ULL, 1383326941296346ULL,
+    961937908925785ULL, 80455759693706ULL, 904734540352947ULL, 1507481815385608ULL,
+    2223447444246085ULL, 1083941587175919ULL, 2059929906842505ULL, 1581435440146976ULL,
+    782730187692425ULL, 9928394897574ULL, 1539449519985236ULL, 1923587931078510ULL,
+    552919286076056ULL, 376925408065760ULL, 447320488831784ULL, 1362918338468019ULL,
+    1470031896696846ULL, 2189796996539902ULL, 1337552949959847ULL, 1762287177775726ULL,
+    237994495816815ULL, 1277840395970544ULL, 543972849007241ULL, 1224692671618814ULL,
+    162359533289271ULL, 282240927125249ULL, 586909166382289ULL, 17726488197838ULL,
+    377014554985659ULL, 1433835303052512ULL, 702061469493692ULL, 1142253108318154ULL,
+    318297794307551ULL, 954362646308543ULL, 517363881452320ULL, 1868013482130416ULL,
+    262562472373260ULL, 902232853249919ULL, 2107343057055746ULL, 462368348619024ULL,
+    1893758677092974ULL, 2177729767846389ULL, 2168532543559143ULL, 443867094639821ULL,
+    730169342581022ULL, 1564589016879755ULL, 51218195700649ULL, 76684578423745ULL,
+    560266272480743ULL, 922517457707697ULL, 2066645939860874ULL, 1318277348414638ULL,
+    1576726809084003ULL, 1817337608563665ULL, 1874240939237666ULL, 754733726333910ULL,
+    97085310406474ULL, 751148364309235ULL, 1622159695715187ULL, 1444098819684916ULL,
+    130920805558089ULL, 1260449179085308ULL, 1860021740768461ULL, 110052860348509ULL,
+    193830891643810ULL, 164148413933881ULL, 180017794795332ULL, 1523506525254651ULL,
+    465981629225956ULL, 559733514964572ULL, 1279624874416974ULL, 2026642326892306ULL,
+    1425156829982409ULL, 2160936383793147ULL, 1061870624975247ULL, 2023497043036941ULL,
+    117942212883190ULL, 490339622800774ULL, 1729931303146295ULL, 422305932971074ULL,
+    529103152793096ULL, 1211973233775992ULL, 721364955929681ULL, 1497674430438813ULL,
+    342545521275073ULL, 2102107575279372ULL, 2108462244669966ULL, 1382582406064082ULL,
+    2206396818383323ULL, 2109093268641147ULL, 10809845110983ULL, 1605176920880099ULL,
+    744640650753946ULL, 1712758897518129ULL, 373410811281809ULL, 648838265800209ULL,
+    813058095530999ULL, 513987632620169ULL, 465516160703329ULL, 2136322186126330ULL,
+    1979645899422932ULL, 1197131006470786ULL, 1467836664863979ULL, 1340751381374628ULL,
+    1810066212667962ULL, 1009933588225499ULL, 1106129188080873ULL, 1388980405213901ULL,
+    533719246598044ULL, 1169435803073277ULL, 198920999285821ULL, 487492330629854ULL,
+    1807093008537778ULL, 1540899012923865ULL, 2075080271659867ULL, 1527990806921523ULL,
+    1323728742908002ULL, 1568595959608205ULL, 1388032187497212ULL, 2026968840050568ULL,
+    1396591153295755ULL, 820416950170901ULL, 520060313205582ULL, 2016404325094901ULL,
+    1584709677868520ULL, 272161374469956ULL, 1567188603996816ULL, 1986160530078221ULL,
+    553930264324589ULL, 1058426729027503ULL, 8762762886675ULL, 2216098143382988ULL,
+    1835145266889223ULL, 1712936431558441ULL, 1017009937844974ULL, 585361667812740ULL,
+    2114711541628181ULL, 2238729632971439ULL, 121257546253072ULL, 847154149018345ULL,
+    211972965476684ULL, 287499084460129ULL, 2098247259180197ULL, 839070411583329ULL,
+    339551619574372ULL, 1432951287640743ULL, 526481249498942ULL, 931991661905195ULL,
+    1884279965674487ULL, 200486405604411ULL, 364173020594788ULL, 518034455936955ULL,
+    1085564703965501ULL, 16030410467927ULL, 604865933167613ULL, 1695298441093964ULL,
+    498856548116159ULL, 2193030062787034ULL, 1706339802964179ULL, 1721199073493888ULL,
+    820740951039755ULL, 1216053436896834ULL, 23954895815139ULL, 1662515208920491ULL,
+    1705443427511899ULL, 1957928899570365ULL, 1189636258255725ULL, 1795695471103809ULL,
+    1691191297654118ULL, 282402585374360ULL, 460405330264832ULL, 63765529445733ULL,
+    469763447404473ULL, 733607089694996ULL, 685410420186959ULL, 1096682630419738ULL,
+    1162548510542362ULL, 1020949526456676ULL, 1211660396870573ULL, 613126398222696ULL,
+    1117829165843251ULL, 742432540886650ULL, 1483755088010658ULL, 942392007134474ULL,
+    1447834130944107ULL, 489368274863410ULL, 23192985544898ULL, 648442406146160ULL,
+    785438843373876ULL, 249464684645238ULL, 170494608205618ULL, 335112827260550ULL,
+    1462050123162735ULL, 1084803668439016ULL, 853459233600325ULL, 215777728187495ULL,
+    1965759433526974ULL, 1349482894446537ULL, 694163317612871ULL, 860536766165036ULL,
+    1178788094084321ULL, 1652739626626996ULL, 2115723946388185ULL, 1577204379094664ULL,
+    1083882859023240ULL, 1768759143381635ULL, 1737180992507258ULL, 246054513922239ULL,
+    577253134087234ULL, 356340280578042ULL, 1638917769925142ULL, 223550348130103ULL,
+    470592666638765ULL, 22663573966996ULL, 596552461152400ULL, 364143537069499ULL, 3942119457699ULL,
+    107951982889287ULL, 1843471406713209ULL, 1625773041610986ULL, 1466141092501702ULL,
+    1043024095021271ULL, 310429964047508ULL, 98559121500372ULL, 152746933782868ULL,
+    259407205078261ULL, 828123093322585ULL, 1576847274280091ULL, 1170871375757302ULL,
+    1588856194642775ULL, 984767822341977ULL, 1141497997993760ULL, 809325345150796ULL,
+    1879837728202511ULL, 201340910657893ULL, 1079157558888483ULL, 1052373448588065ULL,
+    1732036202501778ULL, 2105292670328445ULL, 679751387312402ULL, 1679682144926229ULL,
+    1695823455818780ULL, 498852317075849ULL, 1786555067788433ULL, 1670727545779425ULL,
+    117945875433544ULL, 407939139781844ULL, 854632120023778ULL, 1413383148360437ULL,
+    286030901733673ULL, 1207361858071196ULL, 461340408181417ULL, 1096919590360164ULL,
+    1837594897475685ULL, 533755561544165ULL, 1638688042247712ULL, 1431653684793005ULL,
+    1036458538873559ULL, 390822120341779ULL, 1920929837111618ULL, 543426740024168ULL,
+    645751357799929ULL, 2245025632994463ULL, 1550778638076452ULL, 223738153459949ULL,
+    1337209385492033ULL, 1276967236456531ULL, 1463815821063071ULL, 2070620870191473ULL,
+    1199170709413753ULL, 273230877394166ULL, 1873264887608046ULL, 890877152910775ULL,
+    983226445635730ULL, 44873798519521ULL, 697147127512130ULL, 961631038239304ULL,
+    709966160696826ULL, 1706677689540366ULL, 502782733796035ULL, 812545535346033ULL,
+    1693622521296452ULL, 1955813093002510ULL, 1259937612881362ULL, 1873032503803559ULL,
+    1140330566016428ULL, 1675726082440190ULL, 60029928909786ULL, 170335608866763ULL,
+    766444312315022ULL, 2025049511434113ULL, 2200845622430647ULL, 1201269851450408ULL,
+    590071752404907ULL, 1400995030286946ULL, 2152637413853822ULL, 2108495473841983ULL,
+    3855406710349ULL, 1726137673168580ULL, 51004317200100ULL, 1749082328586939ULL,
+    1704088976144558ULL, 1977318954775118ULL, 2062602253162400ULL, 948062503217479ULL,
+    361953965048030ULL, 1528264887238440ULL, 62582552172290ULL, 2241602163389280ULL,
+    156385388121765ULL, 2124100319761492ULL, 388928050571382ULL, 1556123596922727ULL,
+    979310669812384ULL, 113043855206104ULL, 2023223924825469ULL, 643651703263034ULL,
+    2234446903655540ULL, 1577241261424997ULL, 860253174523845ULL, 1691026473082448ULL,
+    1091672764933872ULL, 1957463109756365ULL, 530699502660193ULL, 349587141723569ULL,
+    674661681919563ULL, 1633727303856240ULL, 708909037922144ULL, 2160722508518119ULL,
+    1302188051602540ULL, 976114603845777ULL, 120004758721939ULL, 1681630708873780ULL,
+    622274095069244ULL, 1822346309016698ULL, 1100921177951904ULL, 2216952659181677ULL,
+    1844020550362490ULL, 1976451368365774ULL, 1321101422068822ULL, 1189859436282668ULL,
+    2008801879735257ULL, 2219413454333565ULL, 424288774231098ULL, 359793146977912ULL,
+    270293357948703ULL, 587226003677000ULL, 1482071926139945ULL, 1419630774650359ULL,
+    1104739070570175ULL, 1662129023224130ULL, 1609203612533411ULL, 1250932720691980ULL,
+    95215711818495ULL, 498746909028150ULL, 158151296991874ULL, 1201379988527734ULL,
+    561599945143989ULL, 2211577425617888ULL, 2166577612206324ULL, 1057590354233512ULL,
+    1968123280416769ULL, 1316586165401313ULL, 762728164447634ULL, 2045395244316047ULL,
+    1531796898725716ULL, 315385971670425ULL, 1109421039396756ULL, 2183635256408562ULL,
+    1896751252659461ULL, 840236037179080ULL, 796245792277211ULL, 508345890111193ULL,
+    1275386465287222ULL, 513560822858784ULL, 1784735733120313ULL, 1346467478899695ULL,
+    601125231208417ULL, 701076661112726ULL, 1841998436455089ULL, 1156768600940434ULL,
+    1967853462343221ULL, 2178318463061452ULL, 481885520752741ULL, 675262828640945ULL,
+    1033539418596582ULL, 1743329872635846ULL, 159322641251283ULL, 1573076470127113ULL,
+    954827619308195ULL, 778834750662635ULL, 619912782122617ULL, 515681498488209ULL,
+    1675866144246843ULL, 811716020969981ULL, 1125515272217398ULL, 1398917918287342ULL,
+    1301680949183175ULL, 726474739583734ULL, 587246193475200ULL, 1096581582611864ULL,
+    1469911826213486ULL, 1990099711206364ULL, 1256496099816508ULL, 2019924615195672ULL,
+    1251232456707555ULL, 2042971196009755ULL, 214061878479265ULL, 115385726395472ULL,
+    1677875239524132ULL, 756888883383540ULL, 1153862117756233ULL, 503391530851096ULL,
+    946070017477513ULL, 1878319040542579ULL, 1101349418586920ULL, 793245696431613ULL,
+    397920495357645ULL, 2174023872951112ULL, 1517867915189593ULL, 1829855041462995ULL,
+    1046709983503619ULL, 424081940711857ULL, 2112438073094647ULL, 1504338467349861ULL,
+    2244574127374532ULL, 2136937537441911ULL, 1741150838990304ULL, 25894628400571ULL,
+    512213526781178ULL, 1168384260796379ULL, 1424607682379833ULL, 938677789731564ULL,
+    872882241891896ULL, 1713199397007700ULL, 1410496326218359ULL, 854379752407031ULL,
+    465141611727634ULL, 315176937037857ULL, 1020115054571233ULL, 1856290111077229ULL,
+    2028366269898204ULL, 1432980880307543ULL, 469932710425448ULL, 581165267592247ULL,
+    496399148156603ULL, 2063435226705903ULL, 2116841086237705ULL, 498272567217048ULL,
+    1829438076967906ULL, 1573925801278491ULL, 460763576329867ULL, 1705264723728225ULL,
+    999514866082412ULL, 29635061779362ULL, 1884233592281020ULL, 1449755591461338ULL,
+    42579292783222ULL, 1869504355369200ULL, 495506004805251ULL, 264073104888427ULL,
+    2088880861028612ULL, 104646456386576ULL, 1258445191399967ULL, 1348736801545799ULL,
+    2068276361286613ULL, 884897216646374ULL, 922387476801376ULL, 1043886580402805ULL,
+    1240883498470831ULL, 1601554651937110ULL, 804382935289482ULL, 512379564477239ULL,
+    1466384519077032ULL, 1280698500238386ULL, 211303836685749ULL, 2081725624793803ULL,
+    545247644516879ULL, 215313359330384ULL, 286479751145614ULL, 2213650281751636ULL,
+    2164927945999874ULL, 2072162991540882ULL, 1443769115444779ULL, 1581473274363095ULL,
+    434633875922699ULL, 340456055781599ULL, 373043091080189ULL, 839476566531776ULL,
+    1856706858509978ULL, 931616224909153ULL, 1888181317414065ULL, 213654322650262ULL,
+    1161078103416244ULL, 1822042328851513ULL, 915817709028812ULL, 1828297056698188ULL,
+    1212017130909403ULL, 60258343247333ULL, 342085800008230ULL, 930240559508270ULL,
+    1549884999174952ULL, 809895264249462ULL, 184726257947682ULL, 1157065433504828ULL,
+    1209999630381477ULL, 999920399374391ULL, 1714770150788163ULL, 2026130985413228ULL,
+    506776632883140ULL, 1349042668246528ULL, 1937232292976967ULL, 942302637530730ULL,
+    160211904766226ULL, 1042724500438571ULL, 212454865139142ULL, 244104425172642ULL,
+    1376990622387496ULL, 76126752421227ULL, 1027540886376422ULL, 1912210655133026ULL,
+    13410411589575ULL, 1475856708587773ULL, 615563352691682ULL, 1446629324872644ULL,
+    1683670301784014ULL, 1049873327197127ULL, 1826401704084838ULL, 2032577048760775ULL,
+    1922203607878853ULL, 836708788764806ULL, 2193084654695012ULL, 1342923183256659ULL,
+    849356986294271ULL, 1228863973965618ULL, 94886161081867ULL, 1423288430204892ULL,
+    2016167528707016ULL, 1633187660972877ULL, 1550621242301752ULL, 340630244512994ULL,
+    2103577710806901ULL, 221625016538931ULL, 421544147350960ULL, 580428704555156ULL,
+    1479831381265617ULL, 518057926544698ULL, 955027348790630ULL, 1326749172561598ULL,
+    1118304625755967ULL, 1994005916095176ULL, 1799757332780663ULL, 751343129396941ULL,
+    1468672898746144ULL, 1451689964451386ULL, 755070293921171ULL, 904857405877052ULL,
+    1276087530766984ULL, 403986562858511ULL, 1530661255035337ULL, 1644972908910502ULL,
+    1370170080438957ULL, 139839536695744ULL, 909930462436512ULL, 1899999215356933ULL,
+    635992381064566ULL, 788740975837654ULL, 224241231493695ULL, 1267090030199302ULL,
+    998908061660139ULL, 1784537499699278ULL, 859195370018706ULL, 1953966091439379ULL,
+    2189271820076010ULL, 2039067059943978ULL, 1526694380855202ULL, 2040321513194941ULL,
+    329922071218689ULL, 1953032256401326ULL, 989631424403521ULL, 328825014934242ULL,
+    9407151397696ULL, 63551373671268ULL, 1624728632895792ULL, 1608324920739262ULL,
+    1178239350351945ULL, 1198077399579702ULL, 277620088676229ULL, 1775359437312528ULL,
+    1653558177737477ULL, 1652066043408850ULL, 1063359889686622ULL, 1975063804860653ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Frodo_KEM.h b/include/internal/Hacl_Frodo_KEM.h
index 5d8f2a85..a4e2f62a 100644
--- a/include/internal/Hacl_Frodo_KEM.h
+++ b/include/internal/Hacl_Frodo_KEM.h
@@ -55,22 +55,22 @@ Hacl_Keccak_shake128_4x(
   uint8_t *output3
 )
 {
-  Hacl_SHA3_shake128_hacl(input_len, input0, output_len, output0);
-  Hacl_SHA3_shake128_hacl(input_len, input1, output_len, output1);
-  Hacl_SHA3_shake128_hacl(input_len, input2, output_len, output2);
-  Hacl_SHA3_shake128_hacl(input_len, input3, output_len, output3);
+  Hacl_Hash_SHA3_shake128_hacl(input_len, input0, output_len, output0);
+  Hacl_Hash_SHA3_shake128_hacl(input_len, input1, output_len, output1);
+  Hacl_Hash_SHA3_shake128_hacl(input_len, input2, output_len, output2);
+  Hacl_Hash_SHA3_shake128_hacl(input_len, input3, output_len, output3);
 }
 
 static inline void
 Hacl_Impl_Matrix_mod_pow2(uint32_t n1, uint32_t n2, uint32_t logq, uint16_t *a)
 {
-  if (logq < (uint32_t)16U)
+  if (logq < 16U)
   {
-    for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+    for (uint32_t i0 = 0U; i0 < n1; i0++)
     {
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      for (uint32_t i = 0U; i < n2; i++)
       {
-        a[i0 * n2 + i] = a[i0 * n2 + i] & (((uint16_t)1U << logq) - (uint16_t)1U);
+        a[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] & ((1U << logq) - 1U);
       }
     }
     return;
@@ -80,11 +80,11 @@ Hacl_Impl_Matrix_mod_pow2(uint32_t n1, uint32_t n2, uint32_t logq, uint16_t *a)
 static inline void
 Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i = (uint32_t)0U; i < n2; i++)
+    for (uint32_t i = 0U; i < n2; i++)
     {
-      a[i0 * n2 + i] = a[i0 * n2 + i] + b[i0 * n2 + i];
+      a[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] + (uint32_t)b[i0 * n2 + i];
     }
   }
 }
@@ -92,11 +92,11 @@ Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 static inline void
 Hacl_Impl_Matrix_matrix_sub(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i = (uint32_t)0U; i < n2; i++)
+    for (uint32_t i = 0U; i < n2; i++)
     {
-      b[i0 * n2 + i] = a[i0 * n2 + i] - b[i0 * n2 + i];
+      b[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] - (uint32_t)b[i0 * n2 + i];
     }
   }
 }
@@ -111,17 +111,17 @@ Hacl_Impl_Matrix_matrix_mul(
   uint16_t *c
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1++)
+    for (uint32_t i1 = 0U; i1 < n3; i1++)
     {
-      uint16_t res = (uint16_t)0U;
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      uint16_t res = 0U;
+      for (uint32_t i = 0U; i < n2; i++)
       {
         uint16_t aij = a[i0 * n2 + i];
         uint16_t bjk = b[i * n3 + i1];
         uint16_t res0 = res;
-        res = res0 + aij * bjk;
+        res = (uint32_t)res0 + (uint32_t)aij * (uint32_t)bjk;
       }
       c[i0 * n3 + i1] = res;
     }
@@ -138,17 +138,17 @@ Hacl_Impl_Matrix_matrix_mul_s(
   uint16_t *c
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1++)
+    for (uint32_t i1 = 0U; i1 < n3; i1++)
     {
-      uint16_t res = (uint16_t)0U;
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      uint16_t res = 0U;
+      for (uint32_t i = 0U; i < n2; i++)
       {
         uint16_t aij = a[i0 * n2 + i];
         uint16_t bjk = b[i1 * n2 + i];
         uint16_t res0 = res;
-        res = res0 + aij * bjk;
+        res = (uint32_t)res0 + (uint32_t)aij * (uint32_t)bjk;
       }
       c[i0 * n3 + i1] = res;
     }
@@ -158,11 +158,11 @@ Hacl_Impl_Matrix_matrix_mul_s(
 static inline uint16_t
 Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  uint16_t res = (uint16_t)0xFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  uint16_t res = 0xFFFFU;
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
     uint16_t uu____0 = FStar_UInt16_eq_mask(a[i], b[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint16_t r = res;
   return r;
@@ -171,19 +171,19 @@ Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 static inline void
 Hacl_Impl_Matrix_matrix_to_lbytes(uint32_t n1, uint32_t n2, uint16_t *m, uint8_t *res)
 {
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
-    store16_le(res + (uint32_t)2U * i, m[i]);
+    store16_le(res + 2U * i, m[i]);
   }
 }
 
 static inline void
 Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16_t *res)
 {
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
     uint16_t *os = res;
-    uint16_t u = load16_le(b + (uint32_t)2U * i);
+    uint16_t u = load16_le(b + 2U * i);
     uint16_t x = u;
     os[i] = x;
   }
@@ -192,53 +192,53 @@ Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16
 static inline void
 Hacl_Impl_Frodo_Gen_frodo_gen_matrix_shake_4x(uint32_t n, uint8_t *seed, uint16_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint8_t), (uint32_t)8U * n);
-  uint8_t r[(uint32_t)8U * n];
-  memset(r, 0U, (uint32_t)8U * n * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), 8U * n);
+  uint8_t r[8U * n];
+  memset(r, 0U, 8U * n * sizeof (uint8_t));
   uint8_t tmp_seed[72U] = { 0U };
-  memcpy(tmp_seed + (uint32_t)2U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)20U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)38U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)56U, seed, (uint32_t)16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 2U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 20U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 38U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 56U, seed, 16U * sizeof (uint8_t));
   memset(res, 0U, n * n * sizeof (uint16_t));
-  for (uint32_t i = (uint32_t)0U; i < n / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < n / 4U; i++)
   {
-    uint8_t *r0 = r + (uint32_t)0U * n;
-    uint8_t *r1 = r + (uint32_t)2U * n;
-    uint8_t *r2 = r + (uint32_t)4U * n;
-    uint8_t *r3 = r + (uint32_t)6U * n;
+    uint8_t *r0 = r + 0U * n;
+    uint8_t *r1 = r + 2U * n;
+    uint8_t *r2 = r + 4U * n;
+    uint8_t *r3 = r + 6U * n;
     uint8_t *tmp_seed0 = tmp_seed;
-    uint8_t *tmp_seed1 = tmp_seed + (uint32_t)18U;
-    uint8_t *tmp_seed2 = tmp_seed + (uint32_t)36U;
-    uint8_t *tmp_seed3 = tmp_seed + (uint32_t)54U;
-    store16_le(tmp_seed0, (uint16_t)((uint32_t)4U * i + (uint32_t)0U));
-    store16_le(tmp_seed1, (uint16_t)((uint32_t)4U * i + (uint32_t)1U));
-    store16_le(tmp_seed2, (uint16_t)((uint32_t)4U * i + (uint32_t)2U));
-    store16_le(tmp_seed3, (uint16_t)((uint32_t)4U * i + (uint32_t)3U));
-    Hacl_Keccak_shake128_4x((uint32_t)18U,
+    uint8_t *tmp_seed1 = tmp_seed + 18U;
+    uint8_t *tmp_seed2 = tmp_seed + 36U;
+    uint8_t *tmp_seed3 = tmp_seed + 54U;
+    store16_le(tmp_seed0, (uint16_t)(4U * i + 0U));
+    store16_le(tmp_seed1, (uint16_t)(4U * i + 1U));
+    store16_le(tmp_seed2, (uint16_t)(4U * i + 2U));
+    store16_le(tmp_seed3, (uint16_t)(4U * i + 3U));
+    Hacl_Keccak_shake128_4x(18U,
       tmp_seed0,
       tmp_seed1,
       tmp_seed2,
       tmp_seed3,
-      (uint32_t)2U * n,
+      2U * n,
       r0,
       r1,
       r2,
       r3);
-    for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+    for (uint32_t i0 = 0U; i0 < n; i0++)
     {
-      uint8_t *resij0 = r0 + i0 * (uint32_t)2U;
-      uint8_t *resij1 = r1 + i0 * (uint32_t)2U;
-      uint8_t *resij2 = r2 + i0 * (uint32_t)2U;
-      uint8_t *resij3 = r3 + i0 * (uint32_t)2U;
+      uint8_t *resij0 = r0 + i0 * 2U;
+      uint8_t *resij1 = r1 + i0 * 2U;
+      uint8_t *resij2 = r2 + i0 * 2U;
+      uint8_t *resij3 = r3 + i0 * 2U;
       uint16_t u = load16_le(resij0);
-      res[((uint32_t)4U * i + (uint32_t)0U) * n + i0] = u;
+      res[(4U * i + 0U) * n + i0] = u;
       uint16_t u0 = load16_le(resij1);
-      res[((uint32_t)4U * i + (uint32_t)1U) * n + i0] = u0;
+      res[(4U * i + 1U) * n + i0] = u0;
       uint16_t u1 = load16_le(resij2);
-      res[((uint32_t)4U * i + (uint32_t)2U) * n + i0] = u1;
+      res[(4U * i + 2U) * n + i0] = u1;
       uint16_t u2 = load16_le(resij3);
-      res[((uint32_t)4U * i + (uint32_t)3U) * n + i0] = u2;
+      res[(4U * i + 3U) * n + i0] = u2;
     }
   }
 }
@@ -270,27 +270,19 @@ static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table640[13U] =
   {
-    (uint16_t)4643U, (uint16_t)13363U, (uint16_t)20579U, (uint16_t)25843U, (uint16_t)29227U,
-    (uint16_t)31145U, (uint16_t)32103U, (uint16_t)32525U, (uint16_t)32689U, (uint16_t)32745U,
-    (uint16_t)32762U, (uint16_t)32766U, (uint16_t)32767U
+    4643U, 13363U, 20579U, 25843U, 29227U, 31145U, 32103U, 32525U, 32689U, 32745U, 32762U, 32766U,
+    32767U
   };
 
 static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table976[11U] =
-  {
-    (uint16_t)5638U, (uint16_t)15915U, (uint16_t)23689U, (uint16_t)28571U, (uint16_t)31116U,
-    (uint16_t)32217U, (uint16_t)32613U, (uint16_t)32731U, (uint16_t)32760U, (uint16_t)32766U,
-    (uint16_t)32767U
-  };
+  { 5638U, 15915U, 23689U, 28571U, 31116U, 32217U, 32613U, 32731U, 32760U, 32766U, 32767U };
 
 static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table1344[7U] =
-  {
-    (uint16_t)9142U, (uint16_t)23462U, (uint16_t)30338U, (uint16_t)32361U, (uint16_t)32725U,
-    (uint16_t)32765U, (uint16_t)32767U
-  };
+  { 9142U, 23462U, 30338U, 32361U, 32725U, 32765U, 32767U };
 
 static inline void
 Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(
@@ -301,26 +293,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)12U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 12U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -334,26 +326,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)12U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 12U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -367,26 +359,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)10U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 10U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table976[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -400,26 +392,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)6U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 6U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table1344[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -435,39 +427,34 @@ Hacl_Impl_Frodo_Pack_frodo_pack(
   uint8_t *res
 )
 {
-  uint32_t n = n1 * n2 / (uint32_t)8U;
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  uint32_t n = n1 * n2 / 8U;
+  for (uint32_t i = 0U; i < n; i++)
   {
-    uint16_t *a1 = a + (uint32_t)8U * i;
+    uint16_t *a1 = a + 8U * i;
     uint8_t *r = res + d * i;
-    uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
+    uint16_t maskd = (uint32_t)(uint16_t)(1U << d) - 1U;
     uint8_t v16[16U] = { 0U };
-    uint16_t a0 = a1[0U] & maskd;
-    uint16_t a11 = a1[1U] & maskd;
-    uint16_t a2 = a1[2U] & maskd;
-    uint16_t a3 = a1[3U] & maskd;
-    uint16_t a4 = a1[4U] & maskd;
-    uint16_t a5 = a1[5U] & maskd;
-    uint16_t a6 = a1[6U] & maskd;
-    uint16_t a7 = a1[7U] & maskd;
+    uint16_t a0 = (uint32_t)a1[0U] & (uint32_t)maskd;
+    uint16_t a11 = (uint32_t)a1[1U] & (uint32_t)maskd;
+    uint16_t a2 = (uint32_t)a1[2U] & (uint32_t)maskd;
+    uint16_t a3 = (uint32_t)a1[3U] & (uint32_t)maskd;
+    uint16_t a4 = (uint32_t)a1[4U] & (uint32_t)maskd;
+    uint16_t a5 = (uint32_t)a1[5U] & (uint32_t)maskd;
+    uint16_t a6 = (uint32_t)a1[6U] & (uint32_t)maskd;
+    uint16_t a7 = (uint32_t)a1[7U] & (uint32_t)maskd;
     FStar_UInt128_uint128
     templong =
       FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a0),
-                      (uint32_t)7U * d),
-                    FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a11),
-                      (uint32_t)6U * d)),
-                  FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a2),
-                    (uint32_t)5U * d)),
-                FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a3),
-                  (uint32_t)4U * d)),
-              FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a4),
-                (uint32_t)3U * d)),
-            FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a5),
-              (uint32_t)2U * d)),
-          FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a6), (uint32_t)1U * d)),
-        FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a7), (uint32_t)0U * d));
+                      7U * d),
+                    FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a11), 6U * d)),
+                  FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a2), 5U * d)),
+                FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a3), 4U * d)),
+              FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a4), 3U * d)),
+            FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a5), 2U * d)),
+          FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a6), 1U * d)),
+        FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a7), 0U * d));
     store128_be(v16, templong);
-    uint8_t *src = v16 + (uint32_t)16U - d;
+    uint8_t *src = v16 + 16U - d;
     memcpy(r, src, d * sizeof (uint8_t));
   }
 }
@@ -481,48 +468,48 @@ Hacl_Impl_Frodo_Pack_frodo_unpack(
   uint16_t *res
 )
 {
-  uint32_t n = n1 * n2 / (uint32_t)8U;
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  uint32_t n = n1 * n2 / 8U;
+  for (uint32_t i = 0U; i < n; i++)
   {
     uint8_t *b1 = b + d * i;
-    uint16_t *r = res + (uint32_t)8U * i;
-    uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
+    uint16_t *r = res + 8U * i;
+    uint16_t maskd = (uint32_t)(uint16_t)(1U << d) - 1U;
     uint8_t src[16U] = { 0U };
-    memcpy(src + (uint32_t)16U - d, b1, d * sizeof (uint8_t));
+    memcpy(src + 16U - d, b1, d * sizeof (uint8_t));
     FStar_UInt128_uint128 u = load128_be(src);
     FStar_UInt128_uint128 templong = u;
     r[0U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)7U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          7U * d))
+      & (uint32_t)maskd;
     r[1U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)6U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          6U * d))
+      & (uint32_t)maskd;
     r[2U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)5U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          5U * d))
+      & (uint32_t)maskd;
     r[3U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)4U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          4U * d))
+      & (uint32_t)maskd;
     r[4U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)3U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          3U * d))
+      & (uint32_t)maskd;
     r[5U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)2U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          2U * d))
+      & (uint32_t)maskd;
     r[6U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)1U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          1U * d))
+      & (uint32_t)maskd;
     r[7U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)0U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          0U * d))
+      & (uint32_t)maskd;
   }
 }
 
@@ -535,7 +522,7 @@ Hacl_Impl_Frodo_Encode_frodo_key_encode(
   uint16_t *res
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+  for (uint32_t i0 = 0U; i0 < n; i0++)
   {
     uint8_t v8[8U] = { 0U };
     uint8_t *chunk = a + i0 * b;
@@ -544,11 +531,11 @@ Hacl_Impl_Frodo_Encode_frodo_key_encode(
     uint64_t x = u;
     uint64_t x0 = x;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
-      uint64_t rk = x0 >> b * i & (((uint64_t)1U << b) - (uint64_t)1U);
-      res[i0 * n + i] = (uint16_t)rk << (logq - b););
+      0U,
+      8U,
+      1U,
+      uint64_t rk = x0 >> b * i & ((1ULL << b) - 1ULL);
+      res[i0 * n + i] = (uint32_t)(uint16_t)rk << (logq - b););
   }
 }
 
@@ -561,16 +548,16 @@ Hacl_Impl_Frodo_Encode_frodo_key_decode(
   uint8_t *res
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+  for (uint32_t i0 = 0U; i0 < n; i0++)
   {
-    uint64_t templong = (uint64_t)0U;
+    uint64_t templong = 0ULL;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
+      0U,
+      8U,
+      1U,
       uint16_t aik = a[i0 * n + i];
-      uint16_t res1 = (aik + ((uint16_t)1U << (logq - b - (uint32_t)1U))) >> (logq - b);
-      templong = templong | (uint64_t)(res1 & (((uint16_t)1U << b) - (uint16_t)1U)) << b * i;);
+      uint16_t res1 = (((uint32_t)aik + (1U << (logq - b - 1U))) & 0xFFFFU) >> (logq - b);
+      templong = templong | (uint64_t)((uint32_t)res1 & ((1U << b) - 1U)) << b * i;);
     uint64_t templong0 = templong;
     uint8_t v8[8U] = { 0U };
     store64_le(v8, templong0);
diff --git a/include/msvc/internal/Hacl_Hash_Blake2.h b/include/internal/Hacl_HMAC.h
similarity index 82%
rename from include/msvc/internal/Hacl_Hash_Blake2.h
rename to include/internal/Hacl_HMAC.h
index 8f308bd9..ad344c4c 100644
--- a/include/msvc/internal/Hacl_Hash_Blake2.h
+++ b/include/internal/Hacl_HMAC.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __internal_Hacl_Hash_Blake2_H
-#define __internal_Hacl_Hash_Blake2_H
+#ifndef __internal_Hacl_HMAC_H
+#define __internal_Hacl_HMAC_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,8 +35,12 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "internal/Hacl_Impl_Blake2_Constants.h"
-#include "../Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Krmllib.h"
+#include "internal/Hacl_Hash_SHA2.h"
+#include "internal/Hacl_Hash_SHA1.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b.h"
+#include "../Hacl_HMAC.h"
 
 typedef struct K___uint32_t_uint32_t_s
 {
@@ -49,5 +53,5 @@ K___uint32_t_uint32_t;
 }
 #endif
 
-#define __internal_Hacl_Hash_Blake2_H_DEFINED
+#define __internal_Hacl_HMAC_H_DEFINED
 #endif
diff --git a/include/internal/Hacl_Hash_Blake2b.h b/include/internal/Hacl_Hash_Blake2b.h
new file mode 100644
index 00000000..21689d60
--- /dev/null
+++ b/include/internal/Hacl_Hash_Blake2b.h
@@ -0,0 +1,70 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __internal_Hacl_Hash_Blake2b_H
+#define __internal_Hacl_Hash_Blake2b_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "../Hacl_Hash_Blake2b.h"
+
+void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn);
+
+void
+Hacl_Hash_Blake2b_update_multi(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks,
+  uint32_t nb
+);
+
+void
+Hacl_Hash_Blake2b_update_last(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint32_t rem,
+  uint8_t *d
+);
+
+void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __internal_Hacl_Hash_Blake2b_H_DEFINED
+#endif
diff --git a/include/Hacl_Hash_Blake2b_256.h b/include/internal/Hacl_Hash_Blake2b_Simd256.h
similarity index 61%
rename from include/Hacl_Hash_Blake2b_256.h
rename to include/internal/Hacl_Hash_Blake2b_Simd256.h
index 2379fd75..4cc07869 100644
--- a/include/Hacl_Hash_Blake2b_256.h
+++ b/include/internal/Hacl_Hash_Blake2b_Simd256.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Hash_Blake2b_256_H
-#define __Hacl_Hash_Blake2b_256_H
+#ifndef __internal_Hacl_Hash_Blake2b_Simd256_H
+#define __internal_Hacl_Hash_Blake2b_Simd256_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,23 +35,15 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Krmllib.h"
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "../Hacl_Hash_Blake2b_Simd256.h"
 #include "libintvector.h"
 
 void
-Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn);
+Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn);
 
 void
-Hacl_Blake2b_256_blake2b_update_key(
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-);
-
-void
-Hacl_Blake2b_256_blake2b_update_multi(
+Hacl_Hash_Blake2b_Simd256_update_multi(
   uint32_t len,
   Lib_IntVector_Intrinsics_vec256 *wv,
   Lib_IntVector_Intrinsics_vec256 *hash,
@@ -61,7 +53,7 @@ Hacl_Blake2b_256_blake2b_update_multi(
 );
 
 void
-Hacl_Blake2b_256_blake2b_update_last(
+Hacl_Hash_Blake2b_Simd256_update_last(
   uint32_t len,
   Lib_IntVector_Intrinsics_vec256 *wv,
   Lib_IntVector_Intrinsics_vec256 *hash,
@@ -71,49 +63,29 @@ Hacl_Blake2b_256_blake2b_update_last(
 );
 
 void
-Hacl_Blake2b_256_blake2b_finish(
+Hacl_Hash_Blake2b_Simd256_finish(
   uint32_t nn,
   uint8_t *output,
   Lib_IntVector_Intrinsics_vec256 *hash
 );
 
-/**
-Write the BLAKE2b digest of message `d` using key `k` into `output`.
-
-@param nn Length of the to-be-generated digest with 1 <= `nn` <= 64.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2b_256_blake2b(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-);
-
 void
-Hacl_Blake2b_256_load_state256b_from_state32(
+Hacl_Hash_Blake2b_Simd256_load_state256b_from_state32(
   Lib_IntVector_Intrinsics_vec256 *st,
   uint64_t *st32
 );
 
 void
-Hacl_Blake2b_256_store_state256b_to_state32(
+Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32(
   uint64_t *st32,
   Lib_IntVector_Intrinsics_vec256 *st
 );
 
-Lib_IntVector_Intrinsics_vec256 *Hacl_Blake2b_256_blake2b_malloc(void);
+Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_Simd256_malloc_with_key(void);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Hash_Blake2b_256_H_DEFINED
+#define __internal_Hacl_Hash_Blake2b_Simd256_H_DEFINED
 #endif
diff --git a/include/internal/Hacl_Hash_Blake2s.h b/include/internal/Hacl_Hash_Blake2s.h
new file mode 100644
index 00000000..f814aa95
--- /dev/null
+++ b/include/internal/Hacl_Hash_Blake2s.h
@@ -0,0 +1,70 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __internal_Hacl_Hash_Blake2s_H
+#define __internal_Hacl_Hash_Blake2s_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "../Hacl_Hash_Blake2s.h"
+
+void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn);
+
+void
+Hacl_Hash_Blake2s_update_multi(
+  uint32_t len,
+  uint32_t *wv,
+  uint32_t *hash,
+  uint64_t prev,
+  uint8_t *blocks,
+  uint32_t nb
+);
+
+void
+Hacl_Hash_Blake2s_update_last(
+  uint32_t len,
+  uint32_t *wv,
+  uint32_t *hash,
+  uint64_t prev,
+  uint32_t rem,
+  uint8_t *d
+);
+
+void Hacl_Hash_Blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __internal_Hacl_Hash_Blake2s_H_DEFINED
+#endif
diff --git a/include/msvc/Hacl_Hash_Blake2s_128.h b/include/internal/Hacl_Hash_Blake2s_Simd128.h
similarity index 61%
rename from include/msvc/Hacl_Hash_Blake2s_128.h
rename to include/internal/Hacl_Hash_Blake2s_Simd128.h
index 2af827cd..0589aec5 100644
--- a/include/msvc/Hacl_Hash_Blake2s_128.h
+++ b/include/internal/Hacl_Hash_Blake2s_Simd128.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Hash_Blake2s_128_H
-#define __Hacl_Hash_Blake2s_128_H
+#ifndef __internal_Hacl_Hash_Blake2s_Simd128_H
+#define __internal_Hacl_Hash_Blake2s_Simd128_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,22 +35,15 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "../Hacl_Hash_Blake2s_Simd128.h"
 #include "libintvector.h"
 
 void
-Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn);
+Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn);
 
 void
-Hacl_Blake2s_128_blake2s_update_key(
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-);
-
-void
-Hacl_Blake2s_128_blake2s_update_multi(
+Hacl_Hash_Blake2s_Simd128_update_multi(
   uint32_t len,
   Lib_IntVector_Intrinsics_vec128 *wv,
   Lib_IntVector_Intrinsics_vec128 *hash,
@@ -60,7 +53,7 @@ Hacl_Blake2s_128_blake2s_update_multi(
 );
 
 void
-Hacl_Blake2s_128_blake2s_update_last(
+Hacl_Hash_Blake2s_Simd128_update_last(
   uint32_t len,
   Lib_IntVector_Intrinsics_vec128 *wv,
   Lib_IntVector_Intrinsics_vec128 *hash,
@@ -70,49 +63,29 @@ Hacl_Blake2s_128_blake2s_update_last(
 );
 
 void
-Hacl_Blake2s_128_blake2s_finish(
+Hacl_Hash_Blake2s_Simd128_finish(
   uint32_t nn,
   uint8_t *output,
   Lib_IntVector_Intrinsics_vec128 *hash
 );
 
-/**
-Write the BLAKE2s digest of message `d` using key `k` into `output`.
-
-@param nn Length of to-be-generated digest with 1 <= `nn` <= 32.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2s_128_blake2s(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-);
-
 void
-Hacl_Blake2s_128_store_state128s_to_state32(
+Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32(
   uint32_t *st32,
   Lib_IntVector_Intrinsics_vec128 *st
 );
 
 void
-Hacl_Blake2s_128_load_state128s_from_state32(
+Hacl_Hash_Blake2s_Simd128_load_state128s_from_state32(
   Lib_IntVector_Intrinsics_vec128 *st,
   uint32_t *st32
 );
 
-Lib_IntVector_Intrinsics_vec128 *Hacl_Blake2s_128_blake2s_malloc(void);
+Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_Simd128_malloc_with_key(void);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Hash_Blake2s_128_H_DEFINED
+#define __internal_Hacl_Hash_Blake2s_Simd128_H_DEFINED
 #endif
diff --git a/include/internal/Hacl_Hash_MD5.h b/include/internal/Hacl_Hash_MD5.h
index 7fd567f3..dd77aaf1 100644
--- a/include/internal/Hacl_Hash_MD5.h
+++ b/include/internal/Hacl_Hash_MD5.h
@@ -37,21 +37,16 @@ extern "C" {
 
 #include "../Hacl_Hash_MD5.h"
 
-void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s);
+void Hacl_Hash_MD5_init(uint32_t *s);
 
-void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst);
+void Hacl_Hash_MD5_finish(uint32_t *s, uint8_t *dst);
 
-void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
+void Hacl_Hash_MD5_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
 
 void
-Hacl_Hash_MD5_legacy_update_last(
-  uint32_t *s,
-  uint64_t prev_len,
-  uint8_t *input,
-  uint32_t input_len
-);
-
-void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+Hacl_Hash_MD5_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len);
+
+void Hacl_Hash_MD5_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/internal/Hacl_Hash_SHA1.h b/include/internal/Hacl_Hash_SHA1.h
index 72cf492c..ed53be55 100644
--- a/include/internal/Hacl_Hash_SHA1.h
+++ b/include/internal/Hacl_Hash_SHA1.h
@@ -37,21 +37,16 @@ extern "C" {
 
 #include "../Hacl_Hash_SHA1.h"
 
-void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s);
+void Hacl_Hash_SHA1_init(uint32_t *s);
 
-void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst);
+void Hacl_Hash_SHA1_finish(uint32_t *s, uint8_t *dst);
 
-void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
+void Hacl_Hash_SHA1_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
 
 void
-Hacl_Hash_SHA1_legacy_update_last(
-  uint32_t *s,
-  uint64_t prev_len,
-  uint8_t *input,
-  uint32_t input_len
-);
-
-void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+Hacl_Hash_SHA1_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len);
+
+void Hacl_Hash_SHA1_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/internal/Hacl_Hash_SHA2.h b/include/internal/Hacl_Hash_SHA2.h
index bbffdc50..7dade3f3 100644
--- a/include/internal/Hacl_Hash_SHA2.h
+++ b/include/internal/Hacl_Hash_SHA2.h
@@ -40,141 +40,121 @@ extern "C" {
 
 static const
 uint32_t
-Hacl_Impl_SHA2_Generic_h224[8U] =
+Hacl_Hash_SHA2_h224[8U] =
   {
-    (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U,
-    (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U
+    0xc1059ed8U, 0x367cd507U, 0x3070dd17U, 0xf70e5939U, 0xffc00b31U, 0x68581511U, 0x64f98fa7U,
+    0xbefa4fa4U
   };
 
 static const
 uint32_t
-Hacl_Impl_SHA2_Generic_h256[8U] =
+Hacl_Hash_SHA2_h256[8U] =
   {
-    (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU,
-    (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U
+    0x6a09e667U, 0xbb67ae85U, 0x3c6ef372U, 0xa54ff53aU, 0x510e527fU, 0x9b05688cU, 0x1f83d9abU,
+    0x5be0cd19U
   };
 
 static const
 uint64_t
-Hacl_Impl_SHA2_Generic_h384[8U] =
+Hacl_Hash_SHA2_h384[8U] =
   {
-    (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U,
-    (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U,
-    (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U
+    0xcbbb9d5dc1059ed8ULL, 0x629a292a367cd507ULL, 0x9159015a3070dd17ULL, 0x152fecd8f70e5939ULL,
+    0x67332667ffc00b31ULL, 0x8eb44a8768581511ULL, 0xdb0c2e0d64f98fa7ULL, 0x47b5481dbefa4fa4ULL
   };
 
 static const
 uint64_t
-Hacl_Impl_SHA2_Generic_h512[8U] =
+Hacl_Hash_SHA2_h512[8U] =
   {
-    (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU,
-    (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU,
-    (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U
+    0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
+    0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
   };
 
 static const
 uint32_t
-Hacl_Impl_SHA2_Generic_k224_256[64U] =
+Hacl_Hash_SHA2_k224_256[64U] =
   {
-    (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
-    (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
-    (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
-    (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
-    (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
-    (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
-    (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
-    (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
-    (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
-    (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
-    (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
-    (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
-    (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
-    (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
-    (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
-    (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+    0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U,
+    0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU,
+    0x9bdc06a7U, 0xc19bf174U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU,
+    0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U,
+    0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU,
+    0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU,
+    0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U,
+    0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
+    0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U,
+    0xc67178f2U
   };
 
 static const
 uint64_t
-Hacl_Impl_SHA2_Generic_k384_512[80U] =
+Hacl_Hash_SHA2_k384_512[80U] =
   {
-    (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU,
-    (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U,
-    (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U,
-    (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U,
-    (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U,
-    (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U,
-    (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U,
-    (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U,
-    (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU,
-    (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U,
-    (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU,
-    (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU,
-    (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U,
-    (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U,
-    (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U,
-    (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U,
-    (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U,
-    (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU,
-    (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU,
-    (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU,
-    (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U,
-    (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U,
-    (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU,
-    (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU,
-    (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU,
-    (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU,
-    (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U
+    0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+    0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+    0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+    0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+    0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+    0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+    0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+    0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+    0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+    0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+    0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+    0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+    0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+    0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+    0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+    0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+    0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+    0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+    0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+    0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
   };
 
-void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash);
+void Hacl_Hash_SHA2_sha256_init(uint32_t *hash);
 
-void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st);
+void Hacl_Hash_SHA2_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st);
 
 void
-Hacl_SHA2_Scalar32_sha256_update_last(
-  uint64_t totlen,
-  uint32_t len,
-  uint8_t *b,
-  uint32_t *hash
-);
+Hacl_Hash_SHA2_sha256_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *hash);
 
-void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha256_finish(uint32_t *st, uint8_t *h);
 
-void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash);
+void Hacl_Hash_SHA2_sha224_init(uint32_t *hash);
 
 void
-Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st);
+Hacl_Hash_SHA2_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st);
 
-void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha224_finish(uint32_t *st, uint8_t *h);
 
-void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash);
+void Hacl_Hash_SHA2_sha512_init(uint64_t *hash);
 
-void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
+void Hacl_Hash_SHA2_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
 
 void
-Hacl_SHA2_Scalar32_sha512_update_last(
+Hacl_Hash_SHA2_sha512_update_last(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
   uint8_t *b,
   uint64_t *hash
 );
 
-void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha512_finish(uint64_t *st, uint8_t *h);
 
-void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash);
+void Hacl_Hash_SHA2_sha384_init(uint64_t *hash);
 
-void Hacl_SHA2_Scalar32_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
+void Hacl_Hash_SHA2_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
 
 void
-Hacl_SHA2_Scalar32_sha384_update_last(
+Hacl_Hash_SHA2_sha384_update_last(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
   uint8_t *b,
   uint64_t *st
 );
 
-void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha384_finish(uint64_t *st, uint8_t *h);
 
 #if defined(__cplusplus)
 }
diff --git a/include/internal/Hacl_Hash_SHA3.h b/include/internal/Hacl_Hash_SHA3.h
index 6f53d37c..1c8129fb 100644
--- a/include/internal/Hacl_Hash_SHA3.h
+++ b/include/internal/Hacl_Hash_SHA3.h
@@ -53,9 +53,9 @@ Hacl_Hash_SHA3_update_last_sha3(
   uint32_t input_len
 );
 
-void Hacl_Impl_SHA3_state_permute(uint64_t *s);
+void Hacl_Hash_SHA3_state_permute(uint64_t *s);
 
-void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s);
+void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s);
 
 #if defined(__cplusplus)
 }
diff --git a/include/internal/Hacl_Impl_Blake2_Constants.h b/include/internal/Hacl_Impl_Blake2_Constants.h
index 185317ba..aedc2486 100644
--- a/include/internal/Hacl_Impl_Blake2_Constants.h
+++ b/include/internal/Hacl_Impl_Blake2_Constants.h
@@ -37,52 +37,32 @@ extern "C" {
 
 static const
 uint32_t
-Hacl_Impl_Blake2_Constants_sigmaTable[160U] =
+Hacl_Hash_Blake2s_sigmaTable[160U] =
   {
-    (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U, (uint32_t)4U, (uint32_t)5U,
-    (uint32_t)6U, (uint32_t)7U, (uint32_t)8U, (uint32_t)9U, (uint32_t)10U, (uint32_t)11U,
-    (uint32_t)12U, (uint32_t)13U, (uint32_t)14U, (uint32_t)15U, (uint32_t)14U, (uint32_t)10U,
-    (uint32_t)4U, (uint32_t)8U, (uint32_t)9U, (uint32_t)15U, (uint32_t)13U, (uint32_t)6U,
-    (uint32_t)1U, (uint32_t)12U, (uint32_t)0U, (uint32_t)2U, (uint32_t)11U, (uint32_t)7U,
-    (uint32_t)5U, (uint32_t)3U, (uint32_t)11U, (uint32_t)8U, (uint32_t)12U, (uint32_t)0U,
-    (uint32_t)5U, (uint32_t)2U, (uint32_t)15U, (uint32_t)13U, (uint32_t)10U, (uint32_t)14U,
-    (uint32_t)3U, (uint32_t)6U, (uint32_t)7U, (uint32_t)1U, (uint32_t)9U, (uint32_t)4U,
-    (uint32_t)7U, (uint32_t)9U, (uint32_t)3U, (uint32_t)1U, (uint32_t)13U, (uint32_t)12U,
-    (uint32_t)11U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U, (uint32_t)5U, (uint32_t)10U,
-    (uint32_t)4U, (uint32_t)0U, (uint32_t)15U, (uint32_t)8U, (uint32_t)9U, (uint32_t)0U,
-    (uint32_t)5U, (uint32_t)7U, (uint32_t)2U, (uint32_t)4U, (uint32_t)10U, (uint32_t)15U,
-    (uint32_t)14U, (uint32_t)1U, (uint32_t)11U, (uint32_t)12U, (uint32_t)6U, (uint32_t)8U,
-    (uint32_t)3U, (uint32_t)13U, (uint32_t)2U, (uint32_t)12U, (uint32_t)6U, (uint32_t)10U,
-    (uint32_t)0U, (uint32_t)11U, (uint32_t)8U, (uint32_t)3U, (uint32_t)4U, (uint32_t)13U,
-    (uint32_t)7U, (uint32_t)5U, (uint32_t)15U, (uint32_t)14U, (uint32_t)1U, (uint32_t)9U,
-    (uint32_t)12U, (uint32_t)5U, (uint32_t)1U, (uint32_t)15U, (uint32_t)14U, (uint32_t)13U,
-    (uint32_t)4U, (uint32_t)10U, (uint32_t)0U, (uint32_t)7U, (uint32_t)6U, (uint32_t)3U,
-    (uint32_t)9U, (uint32_t)2U, (uint32_t)8U, (uint32_t)11U, (uint32_t)13U, (uint32_t)11U,
-    (uint32_t)7U, (uint32_t)14U, (uint32_t)12U, (uint32_t)1U, (uint32_t)3U, (uint32_t)9U,
-    (uint32_t)5U, (uint32_t)0U, (uint32_t)15U, (uint32_t)4U, (uint32_t)8U, (uint32_t)6U,
-    (uint32_t)2U, (uint32_t)10U, (uint32_t)6U, (uint32_t)15U, (uint32_t)14U, (uint32_t)9U,
-    (uint32_t)11U, (uint32_t)3U, (uint32_t)0U, (uint32_t)8U, (uint32_t)12U, (uint32_t)2U,
-    (uint32_t)13U, (uint32_t)7U, (uint32_t)1U, (uint32_t)4U, (uint32_t)10U, (uint32_t)5U,
-    (uint32_t)10U, (uint32_t)2U, (uint32_t)8U, (uint32_t)4U, (uint32_t)7U, (uint32_t)6U,
-    (uint32_t)1U, (uint32_t)5U, (uint32_t)15U, (uint32_t)11U, (uint32_t)9U, (uint32_t)14U,
-    (uint32_t)3U, (uint32_t)12U, (uint32_t)13U
+    0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U, 8U, 9U, 10U, 11U, 12U, 13U, 14U, 15U, 14U, 10U, 4U, 8U, 9U, 15U,
+    13U, 6U, 1U, 12U, 0U, 2U, 11U, 7U, 5U, 3U, 11U, 8U, 12U, 0U, 5U, 2U, 15U, 13U, 10U, 14U, 3U, 6U,
+    7U, 1U, 9U, 4U, 7U, 9U, 3U, 1U, 13U, 12U, 11U, 14U, 2U, 6U, 5U, 10U, 4U, 0U, 15U, 8U, 9U, 0U,
+    5U, 7U, 2U, 4U, 10U, 15U, 14U, 1U, 11U, 12U, 6U, 8U, 3U, 13U, 2U, 12U, 6U, 10U, 0U, 11U, 8U, 3U,
+    4U, 13U, 7U, 5U, 15U, 14U, 1U, 9U, 12U, 5U, 1U, 15U, 14U, 13U, 4U, 10U, 0U, 7U, 6U, 3U, 9U, 2U,
+    8U, 11U, 13U, 11U, 7U, 14U, 12U, 1U, 3U, 9U, 5U, 0U, 15U, 4U, 8U, 6U, 2U, 10U, 6U, 15U, 14U, 9U,
+    11U, 3U, 0U, 8U, 12U, 2U, 13U, 7U, 1U, 4U, 10U, 5U, 10U, 2U, 8U, 4U, 7U, 6U, 1U, 5U, 15U, 11U,
+    9U, 14U, 3U, 12U, 13U
   };
 
 static const
 uint32_t
-Hacl_Impl_Blake2_Constants_ivTable_S[8U] =
+Hacl_Hash_Blake2s_ivTable_S[8U] =
   {
-    (uint32_t)0x6A09E667U, (uint32_t)0xBB67AE85U, (uint32_t)0x3C6EF372U, (uint32_t)0xA54FF53AU,
-    (uint32_t)0x510E527FU, (uint32_t)0x9B05688CU, (uint32_t)0x1F83D9ABU, (uint32_t)0x5BE0CD19U
+    0x6A09E667U, 0xBB67AE85U, 0x3C6EF372U, 0xA54FF53AU, 0x510E527FU, 0x9B05688CU, 0x1F83D9ABU,
+    0x5BE0CD19U
   };
 
 static const
 uint64_t
-Hacl_Impl_Blake2_Constants_ivTable_B[8U] =
+Hacl_Hash_Blake2s_ivTable_B[8U] =
   {
-    (uint64_t)0x6A09E667F3BCC908U, (uint64_t)0xBB67AE8584CAA73BU, (uint64_t)0x3C6EF372FE94F82BU,
-    (uint64_t)0xA54FF53A5F1D36F1U, (uint64_t)0x510E527FADE682D1U, (uint64_t)0x9B05688C2B3E6C1FU,
-    (uint64_t)0x1F83D9ABFB41BD6BU, (uint64_t)0x5BE0CD19137E2179U
+    0x6A09E667F3BCC908ULL, 0xBB67AE8584CAA73BULL, 0x3C6EF372FE94F82BULL, 0xA54FF53A5F1D36F1ULL,
+    0x510E527FADE682D1ULL, 0x9B05688C2B3E6C1FULL, 0x1F83D9ABFB41BD6BULL, 0x5BE0CD19137E2179ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Impl_FFDHE_Constants.h b/include/internal/Hacl_Impl_FFDHE_Constants.h
index c746c411..80cbdd52 100644
--- a/include/internal/Hacl_Impl_FFDHE_Constants.h
+++ b/include/internal/Hacl_Impl_FFDHE_Constants.h
@@ -35,528 +35,265 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-static const uint8_t Hacl_Impl_FFDHE_Constants_ffdhe_g2[1U] = { (uint8_t)0x02U };
+static const uint8_t Hacl_Impl_FFDHE_Constants_ffdhe_g2[1U] = { 0x02U };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p2048[256U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x28U,
-    (uint8_t)0x5CU, (uint8_t)0x97U, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x28U, 0x5CU,
+    0x97U, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p3072[384U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0xC6U, (uint8_t)0x2EU, (uint8_t)0x37U, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0xC6U, 0x2EU, 0x37U, 0xFFU,
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p4096[512U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x65U, (uint8_t)0x5FU, (uint8_t)0x6AU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x65U, 0x5FU, 0x6AU, 0xFFU, 0xFFU, 0xFFU,
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p6144[768U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U,
-    (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U,
-    (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU,
-    (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU,
-    (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U,
-    (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U,
-    (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U,
-    (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU,
-    (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU,
-    (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU,
-    (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU,
-    (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U,
-    (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U,
-    (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U,
-    (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU,
-    (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU,
-    (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U,
-    (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U,
-    (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU,
-    (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU,
-    (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U,
-    (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U,
-    (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU,
-    (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU,
-    (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U,
-    (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U,
-    (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U,
-    (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U,
-    (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU,
-    (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U,
-    (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U,
-    (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU,
-    (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU,
-    (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU,
-    (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U,
-    (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU,
-    (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U,
-    (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU,
-    (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU,
-    (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U,
-    (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U,
-    (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U,
-    (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU,
-    (uint8_t)0xD0U, (uint8_t)0xE4U, (uint8_t)0x0EU, (uint8_t)0x65U, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x0DU, 0xD9U, 0x02U, 0x0BU, 0xFDU, 0x64U,
+    0xB6U, 0x45U, 0x03U, 0x6CU, 0x7AU, 0x4EU, 0x67U, 0x7DU, 0x2CU, 0x38U, 0x53U, 0x2AU, 0x3AU,
+    0x23U, 0xBAU, 0x44U, 0x42U, 0xCAU, 0xF5U, 0x3EU, 0xA6U, 0x3BU, 0xB4U, 0x54U, 0x32U, 0x9BU,
+    0x76U, 0x24U, 0xC8U, 0x91U, 0x7BU, 0xDDU, 0x64U, 0xB1U, 0xC0U, 0xFDU, 0x4CU, 0xB3U, 0x8EU,
+    0x8CU, 0x33U, 0x4CU, 0x70U, 0x1CU, 0x3AU, 0xCDU, 0xADU, 0x06U, 0x57U, 0xFCU, 0xCFU, 0xECU,
+    0x71U, 0x9BU, 0x1FU, 0x5CU, 0x3EU, 0x4EU, 0x46U, 0x04U, 0x1FU, 0x38U, 0x81U, 0x47U, 0xFBU,
+    0x4CU, 0xFDU, 0xB4U, 0x77U, 0xA5U, 0x24U, 0x71U, 0xF7U, 0xA9U, 0xA9U, 0x69U, 0x10U, 0xB8U,
+    0x55U, 0x32U, 0x2EU, 0xDBU, 0x63U, 0x40U, 0xD8U, 0xA0U, 0x0EU, 0xF0U, 0x92U, 0x35U, 0x05U,
+    0x11U, 0xE3U, 0x0AU, 0xBEU, 0xC1U, 0xFFU, 0xF9U, 0xE3U, 0xA2U, 0x6EU, 0x7FU, 0xB2U, 0x9FU,
+    0x8CU, 0x18U, 0x30U, 0x23U, 0xC3U, 0x58U, 0x7EU, 0x38U, 0xDAU, 0x00U, 0x77U, 0xD9U, 0xB4U,
+    0x76U, 0x3EU, 0x4EU, 0x4BU, 0x94U, 0xB2U, 0xBBU, 0xC1U, 0x94U, 0xC6U, 0x65U, 0x1EU, 0x77U,
+    0xCAU, 0xF9U, 0x92U, 0xEEU, 0xAAU, 0xC0U, 0x23U, 0x2AU, 0x28U, 0x1BU, 0xF6U, 0xB3U, 0xA7U,
+    0x39U, 0xC1U, 0x22U, 0x61U, 0x16U, 0x82U, 0x0AU, 0xE8U, 0xDBU, 0x58U, 0x47U, 0xA6U, 0x7CU,
+    0xBEU, 0xF9U, 0xC9U, 0x09U, 0x1BU, 0x46U, 0x2DU, 0x53U, 0x8CU, 0xD7U, 0x2BU, 0x03U, 0x74U,
+    0x6AU, 0xE7U, 0x7FU, 0x5EU, 0x62U, 0x29U, 0x2CU, 0x31U, 0x15U, 0x62U, 0xA8U, 0x46U, 0x50U,
+    0x5DU, 0xC8U, 0x2DU, 0xB8U, 0x54U, 0x33U, 0x8AU, 0xE4U, 0x9FU, 0x52U, 0x35U, 0xC9U, 0x5BU,
+    0x91U, 0x17U, 0x8CU, 0xCFU, 0x2DU, 0xD5U, 0xCAU, 0xCEU, 0xF4U, 0x03U, 0xECU, 0x9DU, 0x18U,
+    0x10U, 0xC6U, 0x27U, 0x2BU, 0x04U, 0x5BU, 0x3BU, 0x71U, 0xF9U, 0xDCU, 0x6BU, 0x80U, 0xD6U,
+    0x3FU, 0xDDU, 0x4AU, 0x8EU, 0x9AU, 0xDBU, 0x1EU, 0x69U, 0x62U, 0xA6U, 0x95U, 0x26U, 0xD4U,
+    0x31U, 0x61U, 0xC1U, 0xA4U, 0x1DU, 0x57U, 0x0DU, 0x79U, 0x38U, 0xDAU, 0xD4U, 0xA4U, 0x0EU,
+    0x32U, 0x9CU, 0xD0U, 0xE4U, 0x0EU, 0x65U, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU,
+    0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p8192[1024U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U,
-    (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U,
-    (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU,
-    (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU,
-    (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U,
-    (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U,
-    (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U,
-    (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU,
-    (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU,
-    (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU,
-    (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU,
-    (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U,
-    (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U,
-    (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U,
-    (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU,
-    (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU,
-    (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U,
-    (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U,
-    (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU,
-    (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU,
-    (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U,
-    (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U,
-    (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU,
-    (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU,
-    (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U,
-    (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U,
-    (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U,
-    (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U,
-    (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU,
-    (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U,
-    (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U,
-    (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU,
-    (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU,
-    (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU,
-    (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U,
-    (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU,
-    (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U,
-    (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU,
-    (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU,
-    (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U,
-    (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U,
-    (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U,
-    (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU,
-    (uint8_t)0xCFU, (uint8_t)0xF4U, (uint8_t)0x6AU, (uint8_t)0xAAU, (uint8_t)0x36U, (uint8_t)0xADU,
-    (uint8_t)0x00U, (uint8_t)0x4CU, (uint8_t)0xF6U, (uint8_t)0x00U, (uint8_t)0xC8U, (uint8_t)0x38U,
-    (uint8_t)0x1EU, (uint8_t)0x42U, (uint8_t)0x5AU, (uint8_t)0x31U, (uint8_t)0xD9U, (uint8_t)0x51U,
-    (uint8_t)0xAEU, (uint8_t)0x64U, (uint8_t)0xFDU, (uint8_t)0xB2U, (uint8_t)0x3FU, (uint8_t)0xCEU,
-    (uint8_t)0xC9U, (uint8_t)0x50U, (uint8_t)0x9DU, (uint8_t)0x43U, (uint8_t)0x68U, (uint8_t)0x7FU,
-    (uint8_t)0xEBU, (uint8_t)0x69U, (uint8_t)0xEDU, (uint8_t)0xD1U, (uint8_t)0xCCU, (uint8_t)0x5EU,
-    (uint8_t)0x0BU, (uint8_t)0x8CU, (uint8_t)0xC3U, (uint8_t)0xBDU, (uint8_t)0xF6U, (uint8_t)0x4BU,
-    (uint8_t)0x10U, (uint8_t)0xEFU, (uint8_t)0x86U, (uint8_t)0xB6U, (uint8_t)0x31U, (uint8_t)0x42U,
-    (uint8_t)0xA3U, (uint8_t)0xABU, (uint8_t)0x88U, (uint8_t)0x29U, (uint8_t)0x55U, (uint8_t)0x5BU,
-    (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x7CU, (uint8_t)0x93U, (uint8_t)0x26U, (uint8_t)0x65U,
-    (uint8_t)0xCBU, (uint8_t)0x2CU, (uint8_t)0x0FU, (uint8_t)0x1CU, (uint8_t)0xC0U, (uint8_t)0x1BU,
-    (uint8_t)0xD7U, (uint8_t)0x02U, (uint8_t)0x29U, (uint8_t)0x38U, (uint8_t)0x88U, (uint8_t)0x39U,
-    (uint8_t)0xD2U, (uint8_t)0xAFU, (uint8_t)0x05U, (uint8_t)0xE4U, (uint8_t)0x54U, (uint8_t)0x50U,
-    (uint8_t)0x4AU, (uint8_t)0xC7U, (uint8_t)0x8BU, (uint8_t)0x75U, (uint8_t)0x82U, (uint8_t)0x82U,
-    (uint8_t)0x28U, (uint8_t)0x46U, (uint8_t)0xC0U, (uint8_t)0xBAU, (uint8_t)0x35U, (uint8_t)0xC3U,
-    (uint8_t)0x5FU, (uint8_t)0x5CU, (uint8_t)0x59U, (uint8_t)0x16U, (uint8_t)0x0CU, (uint8_t)0xC0U,
-    (uint8_t)0x46U, (uint8_t)0xFDU, (uint8_t)0x82U, (uint8_t)0x51U, (uint8_t)0x54U, (uint8_t)0x1FU,
-    (uint8_t)0xC6U, (uint8_t)0x8CU, (uint8_t)0x9CU, (uint8_t)0x86U, (uint8_t)0xB0U, (uint8_t)0x22U,
-    (uint8_t)0xBBU, (uint8_t)0x70U, (uint8_t)0x99U, (uint8_t)0x87U, (uint8_t)0x6AU, (uint8_t)0x46U,
-    (uint8_t)0x0EU, (uint8_t)0x74U, (uint8_t)0x51U, (uint8_t)0xA8U, (uint8_t)0xA9U, (uint8_t)0x31U,
-    (uint8_t)0x09U, (uint8_t)0x70U, (uint8_t)0x3FU, (uint8_t)0xEEU, (uint8_t)0x1CU, (uint8_t)0x21U,
-    (uint8_t)0x7EU, (uint8_t)0x6CU, (uint8_t)0x38U, (uint8_t)0x26U, (uint8_t)0xE5U, (uint8_t)0x2CU,
-    (uint8_t)0x51U, (uint8_t)0xAAU, (uint8_t)0x69U, (uint8_t)0x1EU, (uint8_t)0x0EU, (uint8_t)0x42U,
-    (uint8_t)0x3CU, (uint8_t)0xFCU, (uint8_t)0x99U, (uint8_t)0xE9U, (uint8_t)0xE3U, (uint8_t)0x16U,
-    (uint8_t)0x50U, (uint8_t)0xC1U, (uint8_t)0x21U, (uint8_t)0x7BU, (uint8_t)0x62U, (uint8_t)0x48U,
-    (uint8_t)0x16U, (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x9AU, (uint8_t)0x95U, (uint8_t)0xF9U,
-    (uint8_t)0xD5U, (uint8_t)0xB8U, (uint8_t)0x01U, (uint8_t)0x94U, (uint8_t)0x88U, (uint8_t)0xD9U,
-    (uint8_t)0xC0U, (uint8_t)0xA0U, (uint8_t)0xA1U, (uint8_t)0xFEU, (uint8_t)0x30U, (uint8_t)0x75U,
-    (uint8_t)0xA5U, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0x31U, (uint8_t)0x83U, (uint8_t)0xF8U,
-    (uint8_t)0x1DU, (uint8_t)0x4AU, (uint8_t)0x3FU, (uint8_t)0x2FU, (uint8_t)0xA4U, (uint8_t)0x57U,
-    (uint8_t)0x1EU, (uint8_t)0xFCU, (uint8_t)0x8CU, (uint8_t)0xE0U, (uint8_t)0xBAU, (uint8_t)0x8AU,
-    (uint8_t)0x4FU, (uint8_t)0xE8U, (uint8_t)0xB6U, (uint8_t)0x85U, (uint8_t)0x5DU, (uint8_t)0xFEU,
-    (uint8_t)0x72U, (uint8_t)0xB0U, (uint8_t)0xA6U, (uint8_t)0x6EU, (uint8_t)0xDEU, (uint8_t)0xD2U,
-    (uint8_t)0xFBU, (uint8_t)0xABU, (uint8_t)0xFBU, (uint8_t)0xE5U, (uint8_t)0x8AU, (uint8_t)0x30U,
-    (uint8_t)0xFAU, (uint8_t)0xFAU, (uint8_t)0xBEU, (uint8_t)0x1CU, (uint8_t)0x5DU, (uint8_t)0x71U,
-    (uint8_t)0xA8U, (uint8_t)0x7EU, (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x1EU, (uint8_t)0xF8U,
-    (uint8_t)0xC1U, (uint8_t)0xFEU, (uint8_t)0x86U, (uint8_t)0xFEU, (uint8_t)0xA6U, (uint8_t)0xBBU,
-    (uint8_t)0xFDU, (uint8_t)0xE5U, (uint8_t)0x30U, (uint8_t)0x67U, (uint8_t)0x7FU, (uint8_t)0x0DU,
-    (uint8_t)0x97U, (uint8_t)0xD1U, (uint8_t)0x1DU, (uint8_t)0x49U, (uint8_t)0xF7U, (uint8_t)0xA8U,
-    (uint8_t)0x44U, (uint8_t)0x3DU, (uint8_t)0x08U, (uint8_t)0x22U, (uint8_t)0xE5U, (uint8_t)0x06U,
-    (uint8_t)0xA9U, (uint8_t)0xF4U, (uint8_t)0x61U, (uint8_t)0x4EU, (uint8_t)0x01U, (uint8_t)0x1EU,
-    (uint8_t)0x2AU, (uint8_t)0x94U, (uint8_t)0x83U, (uint8_t)0x8FU, (uint8_t)0xF8U, (uint8_t)0x8CU,
-    (uint8_t)0xD6U, (uint8_t)0x8CU, (uint8_t)0x8BU, (uint8_t)0xB7U, (uint8_t)0xC5U, (uint8_t)0xC6U,
-    (uint8_t)0x42U, (uint8_t)0x4CU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x0DU, 0xD9U, 0x02U, 0x0BU, 0xFDU, 0x64U,
+    0xB6U, 0x45U, 0x03U, 0x6CU, 0x7AU, 0x4EU, 0x67U, 0x7DU, 0x2CU, 0x38U, 0x53U, 0x2AU, 0x3AU,
+    0x23U, 0xBAU, 0x44U, 0x42U, 0xCAU, 0xF5U, 0x3EU, 0xA6U, 0x3BU, 0xB4U, 0x54U, 0x32U, 0x9BU,
+    0x76U, 0x24U, 0xC8U, 0x91U, 0x7BU, 0xDDU, 0x64U, 0xB1U, 0xC0U, 0xFDU, 0x4CU, 0xB3U, 0x8EU,
+    0x8CU, 0x33U, 0x4CU, 0x70U, 0x1CU, 0x3AU, 0xCDU, 0xADU, 0x06U, 0x57U, 0xFCU, 0xCFU, 0xECU,
+    0x71U, 0x9BU, 0x1FU, 0x5CU, 0x3EU, 0x4EU, 0x46U, 0x04U, 0x1FU, 0x38U, 0x81U, 0x47U, 0xFBU,
+    0x4CU, 0xFDU, 0xB4U, 0x77U, 0xA5U, 0x24U, 0x71U, 0xF7U, 0xA9U, 0xA9U, 0x69U, 0x10U, 0xB8U,
+    0x55U, 0x32U, 0x2EU, 0xDBU, 0x63U, 0x40U, 0xD8U, 0xA0U, 0x0EU, 0xF0U, 0x92U, 0x35U, 0x05U,
+    0x11U, 0xE3U, 0x0AU, 0xBEU, 0xC1U, 0xFFU, 0xF9U, 0xE3U, 0xA2U, 0x6EU, 0x7FU, 0xB2U, 0x9FU,
+    0x8CU, 0x18U, 0x30U, 0x23U, 0xC3U, 0x58U, 0x7EU, 0x38U, 0xDAU, 0x00U, 0x77U, 0xD9U, 0xB4U,
+    0x76U, 0x3EU, 0x4EU, 0x4BU, 0x94U, 0xB2U, 0xBBU, 0xC1U, 0x94U, 0xC6U, 0x65U, 0x1EU, 0x77U,
+    0xCAU, 0xF9U, 0x92U, 0xEEU, 0xAAU, 0xC0U, 0x23U, 0x2AU, 0x28U, 0x1BU, 0xF6U, 0xB3U, 0xA7U,
+    0x39U, 0xC1U, 0x22U, 0x61U, 0x16U, 0x82U, 0x0AU, 0xE8U, 0xDBU, 0x58U, 0x47U, 0xA6U, 0x7CU,
+    0xBEU, 0xF9U, 0xC9U, 0x09U, 0x1BU, 0x46U, 0x2DU, 0x53U, 0x8CU, 0xD7U, 0x2BU, 0x03U, 0x74U,
+    0x6AU, 0xE7U, 0x7FU, 0x5EU, 0x62U, 0x29U, 0x2CU, 0x31U, 0x15U, 0x62U, 0xA8U, 0x46U, 0x50U,
+    0x5DU, 0xC8U, 0x2DU, 0xB8U, 0x54U, 0x33U, 0x8AU, 0xE4U, 0x9FU, 0x52U, 0x35U, 0xC9U, 0x5BU,
+    0x91U, 0x17U, 0x8CU, 0xCFU, 0x2DU, 0xD5U, 0xCAU, 0xCEU, 0xF4U, 0x03U, 0xECU, 0x9DU, 0x18U,
+    0x10U, 0xC6U, 0x27U, 0x2BU, 0x04U, 0x5BU, 0x3BU, 0x71U, 0xF9U, 0xDCU, 0x6BU, 0x80U, 0xD6U,
+    0x3FU, 0xDDU, 0x4AU, 0x8EU, 0x9AU, 0xDBU, 0x1EU, 0x69U, 0x62U, 0xA6U, 0x95U, 0x26U, 0xD4U,
+    0x31U, 0x61U, 0xC1U, 0xA4U, 0x1DU, 0x57U, 0x0DU, 0x79U, 0x38U, 0xDAU, 0xD4U, 0xA4U, 0x0EU,
+    0x32U, 0x9CU, 0xCFU, 0xF4U, 0x6AU, 0xAAU, 0x36U, 0xADU, 0x00U, 0x4CU, 0xF6U, 0x00U, 0xC8U,
+    0x38U, 0x1EU, 0x42U, 0x5AU, 0x31U, 0xD9U, 0x51U, 0xAEU, 0x64U, 0xFDU, 0xB2U, 0x3FU, 0xCEU,
+    0xC9U, 0x50U, 0x9DU, 0x43U, 0x68U, 0x7FU, 0xEBU, 0x69U, 0xEDU, 0xD1U, 0xCCU, 0x5EU, 0x0BU,
+    0x8CU, 0xC3U, 0xBDU, 0xF6U, 0x4BU, 0x10U, 0xEFU, 0x86U, 0xB6U, 0x31U, 0x42U, 0xA3U, 0xABU,
+    0x88U, 0x29U, 0x55U, 0x5BU, 0x2FU, 0x74U, 0x7CU, 0x93U, 0x26U, 0x65U, 0xCBU, 0x2CU, 0x0FU,
+    0x1CU, 0xC0U, 0x1BU, 0xD7U, 0x02U, 0x29U, 0x38U, 0x88U, 0x39U, 0xD2U, 0xAFU, 0x05U, 0xE4U,
+    0x54U, 0x50U, 0x4AU, 0xC7U, 0x8BU, 0x75U, 0x82U, 0x82U, 0x28U, 0x46U, 0xC0U, 0xBAU, 0x35U,
+    0xC3U, 0x5FU, 0x5CU, 0x59U, 0x16U, 0x0CU, 0xC0U, 0x46U, 0xFDU, 0x82U, 0x51U, 0x54U, 0x1FU,
+    0xC6U, 0x8CU, 0x9CU, 0x86U, 0xB0U, 0x22U, 0xBBU, 0x70U, 0x99U, 0x87U, 0x6AU, 0x46U, 0x0EU,
+    0x74U, 0x51U, 0xA8U, 0xA9U, 0x31U, 0x09U, 0x70U, 0x3FU, 0xEEU, 0x1CU, 0x21U, 0x7EU, 0x6CU,
+    0x38U, 0x26U, 0xE5U, 0x2CU, 0x51U, 0xAAU, 0x69U, 0x1EU, 0x0EU, 0x42U, 0x3CU, 0xFCU, 0x99U,
+    0xE9U, 0xE3U, 0x16U, 0x50U, 0xC1U, 0x21U, 0x7BU, 0x62U, 0x48U, 0x16U, 0xCDU, 0xADU, 0x9AU,
+    0x95U, 0xF9U, 0xD5U, 0xB8U, 0x01U, 0x94U, 0x88U, 0xD9U, 0xC0U, 0xA0U, 0xA1U, 0xFEU, 0x30U,
+    0x75U, 0xA5U, 0x77U, 0xE2U, 0x31U, 0x83U, 0xF8U, 0x1DU, 0x4AU, 0x3FU, 0x2FU, 0xA4U, 0x57U,
+    0x1EU, 0xFCU, 0x8CU, 0xE0U, 0xBAU, 0x8AU, 0x4FU, 0xE8U, 0xB6U, 0x85U, 0x5DU, 0xFEU, 0x72U,
+    0xB0U, 0xA6U, 0x6EU, 0xDEU, 0xD2U, 0xFBU, 0xABU, 0xFBU, 0xE5U, 0x8AU, 0x30U, 0xFAU, 0xFAU,
+    0xBEU, 0x1CU, 0x5DU, 0x71U, 0xA8U, 0x7EU, 0x2FU, 0x74U, 0x1EU, 0xF8U, 0xC1U, 0xFEU, 0x86U,
+    0xFEU, 0xA6U, 0xBBU, 0xFDU, 0xE5U, 0x30U, 0x67U, 0x7FU, 0x0DU, 0x97U, 0xD1U, 0x1DU, 0x49U,
+    0xF7U, 0xA8U, 0x44U, 0x3DU, 0x08U, 0x22U, 0xE5U, 0x06U, 0xA9U, 0xF4U, 0x61U, 0x4EU, 0x01U,
+    0x1EU, 0x2AU, 0x94U, 0x83U, 0x8FU, 0xF8U, 0x8CU, 0xD6U, 0x8CU, 0x8BU, 0xB7U, 0xC5U, 0xC6U,
+    0x42U, 0x4CU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_K256_PrecompTable.h b/include/internal/Hacl_K256_PrecompTable.h
index 26bdfa1f..ff15f1c9 100644
--- a/include/internal/Hacl_K256_PrecompTable.h
+++ b/include/internal/Hacl_K256_PrecompTable.h
@@ -39,498 +39,378 @@ static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_basepoint_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)705178180786072U,
-    (uint64_t)3855836460717471U, (uint64_t)4089131105950716U, (uint64_t)3301581525494108U,
-    (uint64_t)133858670344668U, (uint64_t)2199641648059576U, (uint64_t)1278080618437060U,
-    (uint64_t)3959378566518708U, (uint64_t)3455034269351872U, (uint64_t)79417610544803U,
-    (uint64_t)1U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)1282049064345544U, (uint64_t)971732600440099U, (uint64_t)1014594595727339U,
-    (uint64_t)4392159187541980U, (uint64_t)268327875692285U, (uint64_t)2411661712280539U,
-    (uint64_t)1092576199280126U, (uint64_t)4328619610718051U, (uint64_t)3535440816471627U,
-    (uint64_t)95182251488556U, (uint64_t)1893725512243753U, (uint64_t)3619861457111820U,
-    (uint64_t)879374960417905U, (uint64_t)2868056058129113U, (uint64_t)273195291893682U,
-    (uint64_t)2044797305960112U, (uint64_t)2357106853933780U, (uint64_t)3563112438336058U,
-    (uint64_t)2430811541762558U, (uint64_t)106443809495428U, (uint64_t)2231357633909668U,
-    (uint64_t)3641705835951936U, (uint64_t)80642569314189U, (uint64_t)2254841882373268U,
-    (uint64_t)149848031966573U, (uint64_t)2304615661367764U, (uint64_t)2410957403736446U,
-    (uint64_t)2712754805859804U, (uint64_t)2440183877540536U, (uint64_t)99784623895865U,
-    (uint64_t)3667773127482758U, (uint64_t)1354899394473308U, (uint64_t)3636602998800808U,
-    (uint64_t)2709296679846364U, (uint64_t)7253362091963U, (uint64_t)3585950735562744U,
-    (uint64_t)935775991758415U, (uint64_t)4108078106735201U, (uint64_t)556081800336307U,
-    (uint64_t)229585977163057U, (uint64_t)4055594186679801U, (uint64_t)1767681004944933U,
-    (uint64_t)1432634922083242U, (uint64_t)534935602949197U, (uint64_t)251753159522567U,
-    (uint64_t)2846474078499321U, (uint64_t)4488649590348702U, (uint64_t)2437476916025038U,
-    (uint64_t)3040577412822874U, (uint64_t)79405234918614U, (uint64_t)3030621226551508U,
-    (uint64_t)2801117003929806U, (uint64_t)1642927515498422U, (uint64_t)2802725079726297U,
-    (uint64_t)8472780626107U, (uint64_t)866068070352655U, (uint64_t)188080768545106U,
-    (uint64_t)2152119998903058U, (uint64_t)3391239985029665U, (uint64_t)23820026013564U,
-    (uint64_t)2965064154891949U, (uint64_t)1846516097921398U, (uint64_t)4418379948133146U,
-    (uint64_t)3137755426942400U, (uint64_t)47705291301781U, (uint64_t)4278533051105665U,
-    (uint64_t)3453643211214931U, (uint64_t)3379734319145156U, (uint64_t)3762442192097039U,
-    (uint64_t)40243003528694U, (uint64_t)4063448994211201U, (uint64_t)5697015368785U,
-    (uint64_t)1006545411838613U, (uint64_t)4242291693755210U, (uint64_t)135184629190512U,
-    (uint64_t)264898689131035U, (uint64_t)611796474823597U, (uint64_t)3255382250029089U,
-    (uint64_t)3490429246984696U, (uint64_t)236558595864362U, (uint64_t)2055934691551704U,
-    (uint64_t)1487711670114502U, (uint64_t)1823930698221632U, (uint64_t)2130937287438472U,
-    (uint64_t)154610053389779U, (uint64_t)2746573287023216U, (uint64_t)2430987262221221U,
-    (uint64_t)1668741642878689U, (uint64_t)904982541243977U, (uint64_t)56087343124948U,
-    (uint64_t)393905062353536U, (uint64_t)412681877350188U, (uint64_t)3153602040979977U,
-    (uint64_t)4466820876224989U, (uint64_t)146579165617857U, (uint64_t)2628741216508991U,
-    (uint64_t)747994231529806U, (uint64_t)750506569317681U, (uint64_t)1887492790748779U,
-    (uint64_t)35259008682771U, (uint64_t)2085116434894208U, (uint64_t)543291398921711U,
-    (uint64_t)1144362007901552U, (uint64_t)679305136036846U, (uint64_t)141090902244489U,
-    (uint64_t)632480954474859U, (uint64_t)2384513102652591U, (uint64_t)2225529790159790U,
-    (uint64_t)692258664851625U, (uint64_t)198681843567699U, (uint64_t)2397092587228181U,
-    (uint64_t)145862822166614U, (uint64_t)196976540479452U, (uint64_t)3321831130141455U,
-    (uint64_t)69266673089832U, (uint64_t)4469644227342284U, (uint64_t)3899271145504796U,
-    (uint64_t)1261890974076660U, (uint64_t)525357673886694U, (uint64_t)182135997828583U,
-    (uint64_t)4292760618810332U, (uint64_t)3404186545541683U, (uint64_t)312297386688768U,
-    (uint64_t)204377466824608U, (uint64_t)230900767857952U, (uint64_t)3871485172339693U,
-    (uint64_t)779449329662955U, (uint64_t)978655822464694U, (uint64_t)2278252139594027U,
-    (uint64_t)104641527040382U, (uint64_t)3528840153625765U, (uint64_t)4484699080275273U,
-    (uint64_t)1463971951102316U, (uint64_t)4013910812844749U, (uint64_t)228915589433620U,
-    (uint64_t)1209641433482461U, (uint64_t)4043178788774759U, (uint64_t)3008668238856634U,
-    (uint64_t)1448425089071412U, (uint64_t)26269719725037U, (uint64_t)3330785027545223U,
-    (uint64_t)852657975349259U, (uint64_t)227245054466105U, (uint64_t)1534632353984777U,
-    (uint64_t)207715098574660U, (uint64_t)3209837527352280U, (uint64_t)4051688046309066U,
-    (uint64_t)3839009590725955U, (uint64_t)1321506437398842U, (uint64_t)68340219159928U,
-    (uint64_t)1806950276956275U, (uint64_t)3923908055275295U, (uint64_t)743963253393575U,
-    (uint64_t)42162407478783U, (uint64_t)261334584474610U, (uint64_t)3728224928885214U,
-    (uint64_t)4004701081842869U, (uint64_t)709043201644674U, (uint64_t)4267294249150171U,
-    (uint64_t)255540582975025U, (uint64_t)875490593722211U, (uint64_t)796393708218375U,
-    (uint64_t)14774425627956U, (uint64_t)1500040516752097U, (uint64_t)141076627721678U,
-    (uint64_t)2634539368480628U, (uint64_t)1106488853550103U, (uint64_t)2346231921151930U,
-    (uint64_t)897108283954283U, (uint64_t)64616679559843U, (uint64_t)400244949840943U,
-    (uint64_t)1731263826831733U, (uint64_t)1649996579904651U, (uint64_t)3643693449640761U,
-    (uint64_t)172543068638991U, (uint64_t)329537981097182U, (uint64_t)2029799860802869U,
-    (uint64_t)4377737515208862U, (uint64_t)29103311051334U, (uint64_t)265583594111499U,
-    (uint64_t)3798074876561255U, (uint64_t)184749333259352U, (uint64_t)3117395073661801U,
-    (uint64_t)3695784565008833U, (uint64_t)64282709896721U, (uint64_t)1618968913246422U,
-    (uint64_t)3185235128095257U, (uint64_t)3288745068118692U, (uint64_t)1963818603508782U,
-    (uint64_t)281054350739495U, (uint64_t)1658639050810346U, (uint64_t)3061097601679552U,
-    (uint64_t)3023781433263746U, (uint64_t)2770283391242475U, (uint64_t)144508864751908U,
-    (uint64_t)173576288079856U, (uint64_t)46114579547054U, (uint64_t)1679480127300211U,
-    (uint64_t)1683062051644007U, (uint64_t)117183826129323U, (uint64_t)1894068608117440U,
-    (uint64_t)3846899838975733U, (uint64_t)4289279019496192U, (uint64_t)176995887914031U,
-    (uint64_t)78074942938713U, (uint64_t)454207263265292U, (uint64_t)972683614054061U,
-    (uint64_t)808474205144361U, (uint64_t)942703935951735U, (uint64_t)134460241077887U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    705178180786072ULL, 3855836460717471ULL, 4089131105950716ULL, 3301581525494108ULL,
+    133858670344668ULL, 2199641648059576ULL, 1278080618437060ULL, 3959378566518708ULL,
+    3455034269351872ULL, 79417610544803ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1282049064345544ULL,
+    971732600440099ULL, 1014594595727339ULL, 4392159187541980ULL, 268327875692285ULL,
+    2411661712280539ULL, 1092576199280126ULL, 4328619610718051ULL, 3535440816471627ULL,
+    95182251488556ULL, 1893725512243753ULL, 3619861457111820ULL, 879374960417905ULL,
+    2868056058129113ULL, 273195291893682ULL, 2044797305960112ULL, 2357106853933780ULL,
+    3563112438336058ULL, 2430811541762558ULL, 106443809495428ULL, 2231357633909668ULL,
+    3641705835951936ULL, 80642569314189ULL, 2254841882373268ULL, 149848031966573ULL,
+    2304615661367764ULL, 2410957403736446ULL, 2712754805859804ULL, 2440183877540536ULL,
+    99784623895865ULL, 3667773127482758ULL, 1354899394473308ULL, 3636602998800808ULL,
+    2709296679846364ULL, 7253362091963ULL, 3585950735562744ULL, 935775991758415ULL,
+    4108078106735201ULL, 556081800336307ULL, 229585977163057ULL, 4055594186679801ULL,
+    1767681004944933ULL, 1432634922083242ULL, 534935602949197ULL, 251753159522567ULL,
+    2846474078499321ULL, 4488649590348702ULL, 2437476916025038ULL, 3040577412822874ULL,
+    79405234918614ULL, 3030621226551508ULL, 2801117003929806ULL, 1642927515498422ULL,
+    2802725079726297ULL, 8472780626107ULL, 866068070352655ULL, 188080768545106ULL,
+    2152119998903058ULL, 3391239985029665ULL, 23820026013564ULL, 2965064154891949ULL,
+    1846516097921398ULL, 4418379948133146ULL, 3137755426942400ULL, 47705291301781ULL,
+    4278533051105665ULL, 3453643211214931ULL, 3379734319145156ULL, 3762442192097039ULL,
+    40243003528694ULL, 4063448994211201ULL, 5697015368785ULL, 1006545411838613ULL,
+    4242291693755210ULL, 135184629190512ULL, 264898689131035ULL, 611796474823597ULL,
+    3255382250029089ULL, 3490429246984696ULL, 236558595864362ULL, 2055934691551704ULL,
+    1487711670114502ULL, 1823930698221632ULL, 2130937287438472ULL, 154610053389779ULL,
+    2746573287023216ULL, 2430987262221221ULL, 1668741642878689ULL, 904982541243977ULL,
+    56087343124948ULL, 393905062353536ULL, 412681877350188ULL, 3153602040979977ULL,
+    4466820876224989ULL, 146579165617857ULL, 2628741216508991ULL, 747994231529806ULL,
+    750506569317681ULL, 1887492790748779ULL, 35259008682771ULL, 2085116434894208ULL,
+    543291398921711ULL, 1144362007901552ULL, 679305136036846ULL, 141090902244489ULL,
+    632480954474859ULL, 2384513102652591ULL, 2225529790159790ULL, 692258664851625ULL,
+    198681843567699ULL, 2397092587228181ULL, 145862822166614ULL, 196976540479452ULL,
+    3321831130141455ULL, 69266673089832ULL, 4469644227342284ULL, 3899271145504796ULL,
+    1261890974076660ULL, 525357673886694ULL, 182135997828583ULL, 4292760618810332ULL,
+    3404186545541683ULL, 312297386688768ULL, 204377466824608ULL, 230900767857952ULL,
+    3871485172339693ULL, 779449329662955ULL, 978655822464694ULL, 2278252139594027ULL,
+    104641527040382ULL, 3528840153625765ULL, 4484699080275273ULL, 1463971951102316ULL,
+    4013910812844749ULL, 228915589433620ULL, 1209641433482461ULL, 4043178788774759ULL,
+    3008668238856634ULL, 1448425089071412ULL, 26269719725037ULL, 3330785027545223ULL,
+    852657975349259ULL, 227245054466105ULL, 1534632353984777ULL, 207715098574660ULL,
+    3209837527352280ULL, 4051688046309066ULL, 3839009590725955ULL, 1321506437398842ULL,
+    68340219159928ULL, 1806950276956275ULL, 3923908055275295ULL, 743963253393575ULL,
+    42162407478783ULL, 261334584474610ULL, 3728224928885214ULL, 4004701081842869ULL,
+    709043201644674ULL, 4267294249150171ULL, 255540582975025ULL, 875490593722211ULL,
+    796393708218375ULL, 14774425627956ULL, 1500040516752097ULL, 141076627721678ULL,
+    2634539368480628ULL, 1106488853550103ULL, 2346231921151930ULL, 897108283954283ULL,
+    64616679559843ULL, 400244949840943ULL, 1731263826831733ULL, 1649996579904651ULL,
+    3643693449640761ULL, 172543068638991ULL, 329537981097182ULL, 2029799860802869ULL,
+    4377737515208862ULL, 29103311051334ULL, 265583594111499ULL, 3798074876561255ULL,
+    184749333259352ULL, 3117395073661801ULL, 3695784565008833ULL, 64282709896721ULL,
+    1618968913246422ULL, 3185235128095257ULL, 3288745068118692ULL, 1963818603508782ULL,
+    281054350739495ULL, 1658639050810346ULL, 3061097601679552ULL, 3023781433263746ULL,
+    2770283391242475ULL, 144508864751908ULL, 173576288079856ULL, 46114579547054ULL,
+    1679480127300211ULL, 1683062051644007ULL, 117183826129323ULL, 1894068608117440ULL,
+    3846899838975733ULL, 4289279019496192ULL, 176995887914031ULL, 78074942938713ULL,
+    454207263265292ULL, 972683614054061ULL, 808474205144361ULL, 942703935951735ULL,
+    134460241077887ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)4496295042185355U,
-    (uint64_t)3125448202219451U, (uint64_t)1239608518490046U, (uint64_t)2687445637493112U,
-    (uint64_t)77979604880139U, (uint64_t)3360310474215011U, (uint64_t)1216410458165163U,
-    (uint64_t)177901593587973U, (uint64_t)3209978938104985U, (uint64_t)118285133003718U,
-    (uint64_t)434519962075150U, (uint64_t)1114612377498854U, (uint64_t)3488596944003813U,
-    (uint64_t)450716531072892U, (uint64_t)66044973203836U, (uint64_t)2822827191156652U,
-    (uint64_t)2417714248626059U, (uint64_t)2173117567943U, (uint64_t)961513119252459U,
-    (uint64_t)233852556538333U, (uint64_t)3014783730323962U, (uint64_t)2955192634004574U,
-    (uint64_t)580546524951282U, (uint64_t)2982973948711252U, (uint64_t)226295722018730U,
-    (uint64_t)26457116218543U, (uint64_t)3401523493637663U, (uint64_t)2597746825024790U,
-    (uint64_t)1789211180483113U, (uint64_t)155862365823427U, (uint64_t)4056806876632134U,
-    (uint64_t)1742291745730568U, (uint64_t)3527759000626890U, (uint64_t)3740578471192596U,
-    (uint64_t)177295097700537U, (uint64_t)1533961415657770U, (uint64_t)4305228982382487U,
-    (uint64_t)4069090871282711U, (uint64_t)4090877481646667U, (uint64_t)220939617041498U,
-    (uint64_t)2057548127959588U, (uint64_t)45185623103252U, (uint64_t)2871963270423449U,
-    (uint64_t)3312974792248749U, (uint64_t)8710601879528U, (uint64_t)570612225194540U,
-    (uint64_t)2045632925323972U, (uint64_t)1263913878297555U, (uint64_t)1294592284757719U,
-    (uint64_t)238067747295054U, (uint64_t)1576659948829386U, (uint64_t)2315159636629917U,
-    (uint64_t)3624867787891655U, (uint64_t)647628266663887U, (uint64_t)75788399640253U,
-    (uint64_t)710811707847797U, (uint64_t)130020650130128U, (uint64_t)1975045425972589U,
-    (uint64_t)136351545314094U, (uint64_t)229292031212337U, (uint64_t)1061471455264148U,
-    (uint64_t)3281312694184822U, (uint64_t)1692442293921797U, (uint64_t)4171008525509513U,
-    (uint64_t)275424696197549U, (uint64_t)1170296303921965U, (uint64_t)4154092952807735U,
-    (uint64_t)4371262070870741U, (uint64_t)835769811036496U, (uint64_t)275812646528189U,
-    (uint64_t)4006745785521764U, (uint64_t)1965172239781114U, (uint64_t)4121055644916429U,
-    (uint64_t)3578995380229569U, (uint64_t)169798870760022U, (uint64_t)1834234783016431U,
-    (uint64_t)3186919121688538U, (uint64_t)1894269993170652U, (uint64_t)868603832348691U,
-    (uint64_t)110978471368876U, (uint64_t)1659296605881532U, (uint64_t)3257830829309297U,
-    (uint64_t)3381509832701119U, (uint64_t)4016163121121296U, (uint64_t)265240263496294U,
-    (uint64_t)4411285343933251U, (uint64_t)728746770806400U, (uint64_t)1767819098558739U,
-    (uint64_t)3002081480892841U, (uint64_t)96312133241935U, (uint64_t)468184501392107U,
-    (uint64_t)2061529496271208U, (uint64_t)801565111628867U, (uint64_t)3380678576799273U,
-    (uint64_t)121814978170941U, (uint64_t)3340363319165433U, (uint64_t)2764604325746928U,
-    (uint64_t)4475755976431968U, (uint64_t)3678073419927081U, (uint64_t)237001357924061U,
-    (uint64_t)4110487014553450U, (uint64_t)442517757833404U, (uint64_t)3976758767423859U,
-    (uint64_t)2559863799262476U, (uint64_t)178144664279213U, (uint64_t)2488702171798051U,
-    (uint64_t)4292079598620208U, (uint64_t)1642918280217329U, (uint64_t)3694920319798108U,
-    (uint64_t)111735528281657U, (uint64_t)2904433967156033U, (uint64_t)4391518032143166U,
-    (uint64_t)3018885875516259U, (uint64_t)3730342681447122U, (uint64_t)10320273322750U,
-    (uint64_t)555845881555519U, (uint64_t)58355404017985U, (uint64_t)379009359053696U,
-    (uint64_t)450317203955503U, (uint64_t)271063299686173U, (uint64_t)910340241794202U,
-    (uint64_t)4145234574853890U, (uint64_t)2059755654702755U, (uint64_t)626530377112246U,
-    (uint64_t)188918989156857U, (uint64_t)3316657461542117U, (uint64_t)778033563170765U,
-    (uint64_t)3568562306532187U, (uint64_t)2888619469733481U, (uint64_t)4364919962337U,
-    (uint64_t)4095057288587059U, (uint64_t)2275461355379988U, (uint64_t)1507422995910897U,
-    (uint64_t)3737691697116252U, (uint64_t)28779913258578U, (uint64_t)131453301647952U,
-    (uint64_t)3613515597508469U, (uint64_t)2389606941441321U, (uint64_t)2135459302594806U,
-    (uint64_t)105517262484263U, (uint64_t)2973432939331401U, (uint64_t)3447096622477885U,
-    (uint64_t)684654106536844U, (uint64_t)2815198316729695U, (uint64_t)280303067216071U,
-    (uint64_t)1841014812927024U, (uint64_t)1181026273060917U, (uint64_t)4092989148457730U,
-    (uint64_t)1381045116206278U, (uint64_t)112475725893965U, (uint64_t)2309144740156686U,
-    (uint64_t)1558825847609352U, (uint64_t)2008068002046292U, (uint64_t)3153511625856423U,
-    (uint64_t)38469701427673U, (uint64_t)4240572315518056U, (uint64_t)2295170987320580U,
-    (uint64_t)187734093837094U, (uint64_t)301041528077172U, (uint64_t)234553141005715U,
-    (uint64_t)4170513699279606U, (uint64_t)1600132848196146U, (uint64_t)3149113064155689U,
-    (uint64_t)2733255352600949U, (uint64_t)144915931419495U, (uint64_t)1221012073888926U,
-    (uint64_t)4395668111081710U, (uint64_t)2464799161496070U, (uint64_t)3664256125241313U,
-    (uint64_t)239705368981290U, (uint64_t)1415181408539490U, (uint64_t)2551836620449074U,
-    (uint64_t)3003106895689578U, (uint64_t)968947218886924U, (uint64_t)270781532362673U,
-    (uint64_t)2905980714350372U, (uint64_t)3246927349288975U, (uint64_t)2653377642686974U,
-    (uint64_t)1577457093418263U, (uint64_t)279488238785848U, (uint64_t)568335962564552U,
-    (uint64_t)4251365041645758U, (uint64_t)1257832559776007U, (uint64_t)2424022444243863U,
-    (uint64_t)261166122046343U, (uint64_t)4399874608082116U, (uint64_t)640509987891568U,
-    (uint64_t)3119706885332220U, (uint64_t)1990185416694007U, (uint64_t)119390098529341U,
-    (uint64_t)220106534694050U, (uint64_t)937225880034895U, (uint64_t)656288151358882U,
-    (uint64_t)1766967254772100U, (uint64_t)197900790969750U, (uint64_t)2992539221608875U,
-    (uint64_t)3960297171111858U, (uint64_t)3499202002925081U, (uint64_t)1103060980924705U,
-    (uint64_t)13670895919578U, (uint64_t)430132744187721U, (uint64_t)1206771838050953U,
-    (uint64_t)2474749300167198U, (uint64_t)296299539510780U, (uint64_t)61565517686436U,
-    (uint64_t)752778559080573U, (uint64_t)3049015829565410U, (uint64_t)3538647632527371U,
-    (uint64_t)1640473028662032U, (uint64_t)182488721849306U, (uint64_t)1234378482161516U,
-    (uint64_t)3736205988606381U, (uint64_t)2814216844344487U, (uint64_t)3877249891529557U,
-    (uint64_t)51681412928433U, (uint64_t)4275336620301239U, (uint64_t)3084074032750651U,
-    (uint64_t)42732308350456U, (uint64_t)3648603591552229U, (uint64_t)142450621701603U,
-    (uint64_t)4020045475009854U, (uint64_t)1050293952073054U, (uint64_t)1974773673079851U,
-    (uint64_t)1815515638724020U, (uint64_t)104845375825434U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    4496295042185355ULL, 3125448202219451ULL, 1239608518490046ULL, 2687445637493112ULL,
+    77979604880139ULL, 3360310474215011ULL, 1216410458165163ULL, 177901593587973ULL,
+    3209978938104985ULL, 118285133003718ULL, 434519962075150ULL, 1114612377498854ULL,
+    3488596944003813ULL, 450716531072892ULL, 66044973203836ULL, 2822827191156652ULL,
+    2417714248626059ULL, 2173117567943ULL, 961513119252459ULL, 233852556538333ULL,
+    3014783730323962ULL, 2955192634004574ULL, 580546524951282ULL, 2982973948711252ULL,
+    226295722018730ULL, 26457116218543ULL, 3401523493637663ULL, 2597746825024790ULL,
+    1789211180483113ULL, 155862365823427ULL, 4056806876632134ULL, 1742291745730568ULL,
+    3527759000626890ULL, 3740578471192596ULL, 177295097700537ULL, 1533961415657770ULL,
+    4305228982382487ULL, 4069090871282711ULL, 4090877481646667ULL, 220939617041498ULL,
+    2057548127959588ULL, 45185623103252ULL, 2871963270423449ULL, 3312974792248749ULL,
+    8710601879528ULL, 570612225194540ULL, 2045632925323972ULL, 1263913878297555ULL,
+    1294592284757719ULL, 238067747295054ULL, 1576659948829386ULL, 2315159636629917ULL,
+    3624867787891655ULL, 647628266663887ULL, 75788399640253ULL, 710811707847797ULL,
+    130020650130128ULL, 1975045425972589ULL, 136351545314094ULL, 229292031212337ULL,
+    1061471455264148ULL, 3281312694184822ULL, 1692442293921797ULL, 4171008525509513ULL,
+    275424696197549ULL, 1170296303921965ULL, 4154092952807735ULL, 4371262070870741ULL,
+    835769811036496ULL, 275812646528189ULL, 4006745785521764ULL, 1965172239781114ULL,
+    4121055644916429ULL, 3578995380229569ULL, 169798870760022ULL, 1834234783016431ULL,
+    3186919121688538ULL, 1894269993170652ULL, 868603832348691ULL, 110978471368876ULL,
+    1659296605881532ULL, 3257830829309297ULL, 3381509832701119ULL, 4016163121121296ULL,
+    265240263496294ULL, 4411285343933251ULL, 728746770806400ULL, 1767819098558739ULL,
+    3002081480892841ULL, 96312133241935ULL, 468184501392107ULL, 2061529496271208ULL,
+    801565111628867ULL, 3380678576799273ULL, 121814978170941ULL, 3340363319165433ULL,
+    2764604325746928ULL, 4475755976431968ULL, 3678073419927081ULL, 237001357924061ULL,
+    4110487014553450ULL, 442517757833404ULL, 3976758767423859ULL, 2559863799262476ULL,
+    178144664279213ULL, 2488702171798051ULL, 4292079598620208ULL, 1642918280217329ULL,
+    3694920319798108ULL, 111735528281657ULL, 2904433967156033ULL, 4391518032143166ULL,
+    3018885875516259ULL, 3730342681447122ULL, 10320273322750ULL, 555845881555519ULL,
+    58355404017985ULL, 379009359053696ULL, 450317203955503ULL, 271063299686173ULL,
+    910340241794202ULL, 4145234574853890ULL, 2059755654702755ULL, 626530377112246ULL,
+    188918989156857ULL, 3316657461542117ULL, 778033563170765ULL, 3568562306532187ULL,
+    2888619469733481ULL, 4364919962337ULL, 4095057288587059ULL, 2275461355379988ULL,
+    1507422995910897ULL, 3737691697116252ULL, 28779913258578ULL, 131453301647952ULL,
+    3613515597508469ULL, 2389606941441321ULL, 2135459302594806ULL, 105517262484263ULL,
+    2973432939331401ULL, 3447096622477885ULL, 684654106536844ULL, 2815198316729695ULL,
+    280303067216071ULL, 1841014812927024ULL, 1181026273060917ULL, 4092989148457730ULL,
+    1381045116206278ULL, 112475725893965ULL, 2309144740156686ULL, 1558825847609352ULL,
+    2008068002046292ULL, 3153511625856423ULL, 38469701427673ULL, 4240572315518056ULL,
+    2295170987320580ULL, 187734093837094ULL, 301041528077172ULL, 234553141005715ULL,
+    4170513699279606ULL, 1600132848196146ULL, 3149113064155689ULL, 2733255352600949ULL,
+    144915931419495ULL, 1221012073888926ULL, 4395668111081710ULL, 2464799161496070ULL,
+    3664256125241313ULL, 239705368981290ULL, 1415181408539490ULL, 2551836620449074ULL,
+    3003106895689578ULL, 968947218886924ULL, 270781532362673ULL, 2905980714350372ULL,
+    3246927349288975ULL, 2653377642686974ULL, 1577457093418263ULL, 279488238785848ULL,
+    568335962564552ULL, 4251365041645758ULL, 1257832559776007ULL, 2424022444243863ULL,
+    261166122046343ULL, 4399874608082116ULL, 640509987891568ULL, 3119706885332220ULL,
+    1990185416694007ULL, 119390098529341ULL, 220106534694050ULL, 937225880034895ULL,
+    656288151358882ULL, 1766967254772100ULL, 197900790969750ULL, 2992539221608875ULL,
+    3960297171111858ULL, 3499202002925081ULL, 1103060980924705ULL, 13670895919578ULL,
+    430132744187721ULL, 1206771838050953ULL, 2474749300167198ULL, 296299539510780ULL,
+    61565517686436ULL, 752778559080573ULL, 3049015829565410ULL, 3538647632527371ULL,
+    1640473028662032ULL, 182488721849306ULL, 1234378482161516ULL, 3736205988606381ULL,
+    2814216844344487ULL, 3877249891529557ULL, 51681412928433ULL, 4275336620301239ULL,
+    3084074032750651ULL, 42732308350456ULL, 3648603591552229ULL, 142450621701603ULL,
+    4020045475009854ULL, 1050293952073054ULL, 1974773673079851ULL, 1815515638724020ULL,
+    104845375825434ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1277614565900951U,
-    (uint64_t)378671684419493U, (uint64_t)3176260448102880U, (uint64_t)1575691435565077U,
-    (uint64_t)167304528382180U, (uint64_t)2600787765776588U, (uint64_t)7497946149293U,
-    (uint64_t)2184272641272202U, (uint64_t)2200235265236628U, (uint64_t)265969268774814U,
-    (uint64_t)1913228635640715U, (uint64_t)2831959046949342U, (uint64_t)888030405442963U,
-    (uint64_t)1817092932985033U, (uint64_t)101515844997121U, (uint64_t)3309468394859588U,
-    (uint64_t)3965334773689948U, (uint64_t)1945272965790738U, (uint64_t)4450939211427964U,
-    (uint64_t)211349698782702U, (uint64_t)2085160302160079U, (uint64_t)212812506072603U,
-    (uint64_t)3646122434511764U, (uint64_t)1711405092320514U, (uint64_t)95160920508464U,
-    (uint64_t)1677683368518073U, (uint64_t)4384656939250953U, (uint64_t)3548591046529893U,
-    (uint64_t)1683233536091384U, (uint64_t)105919586159941U, (uint64_t)1941416002726455U,
-    (uint64_t)246264372248216U, (uint64_t)3063044110922228U, (uint64_t)3772292170415825U,
-    (uint64_t)222933374989815U, (uint64_t)2417211163452935U, (uint64_t)2018230365573200U,
-    (uint64_t)1985974538911047U, (uint64_t)1387197705332739U, (uint64_t)186400825584956U,
-    (uint64_t)2469330487750329U, (uint64_t)1291983813301638U, (uint64_t)333416733706302U,
-    (uint64_t)3413315564261070U, (uint64_t)189444777569683U, (uint64_t)1062005622360420U,
-    (uint64_t)1800197715938740U, (uint64_t)3693110992551647U, (uint64_t)626990328941945U,
-    (uint64_t)40998857100520U, (uint64_t)3921983552805085U, (uint64_t)1016632437340656U,
-    (uint64_t)4016615929950878U, (uint64_t)2682554586771281U, (uint64_t)7043555162389U,
-    (uint64_t)3333819830676567U, (uint64_t)4120091964944036U, (uint64_t)1960788263484015U,
-    (uint64_t)1642145656273304U, (uint64_t)252814075789128U, (uint64_t)3085777342821357U,
-    (uint64_t)4166637997604052U, (uint64_t)1339401689756469U, (uint64_t)845938529607551U,
-    (uint64_t)223351828189283U, (uint64_t)1148648705186890U, (uint64_t)1230525014760605U,
-    (uint64_t)1869739475126720U, (uint64_t)4193966261205530U, (uint64_t)175684010336013U,
-    (uint64_t)4476719358931508U, (uint64_t)4209547487457638U, (uint64_t)2197536411673724U,
-    (uint64_t)3010838433412303U, (uint64_t)169318997251483U, (uint64_t)49493868302162U,
-    (uint64_t)3594601099078584U, (uint64_t)3662420905445942U, (uint64_t)3606544932233685U,
-    (uint64_t)270643652662165U, (uint64_t)180681786228544U, (uint64_t)2095882682308564U,
-    (uint64_t)813484483841391U, (uint64_t)1622665392824698U, (uint64_t)113821770225137U,
-    (uint64_t)3075432444115417U, (uint64_t)716502989978722U, (uint64_t)2304779892217245U,
-    (uint64_t)1760144151770127U, (uint64_t)235719156963938U, (uint64_t)3180013070471143U,
-    (uint64_t)1331027634540579U, (uint64_t)552273022992392U, (uint64_t)2858693077461887U,
-    (uint64_t)197914407731510U, (uint64_t)187252310910959U, (uint64_t)4160637171377125U,
-    (uint64_t)3225059526713298U, (uint64_t)2574558217383978U, (uint64_t)249695600622489U,
-    (uint64_t)364988742814327U, (uint64_t)4245298536326258U, (uint64_t)1812464706589342U,
-    (uint64_t)2734857123772998U, (uint64_t)120105577124628U, (uint64_t)160179251271109U,
-    (uint64_t)3604555733307834U, (uint64_t)150380003195715U, (uint64_t)1574304909935121U,
-    (uint64_t)142190285600761U, (uint64_t)1835385847725651U, (uint64_t)3168087139615901U,
-    (uint64_t)3201434861713736U, (uint64_t)741757984537760U, (uint64_t)163585009419543U,
-    (uint64_t)3837997981109783U, (uint64_t)3771946407870997U, (uint64_t)2867641360295452U,
-    (uint64_t)3097548691501578U, (uint64_t)124624912142104U, (uint64_t)2729896088769328U,
-    (uint64_t)1087786827035225U, (uint64_t)3934000813818614U, (uint64_t)1176792318645055U,
-    (uint64_t)125311882169270U, (uint64_t)3530709439299502U, (uint64_t)1561477829834527U,
-    (uint64_t)3927894570196761U, (uint64_t)3957765307669212U, (uint64_t)105720519513730U,
-    (uint64_t)3758969845816997U, (uint64_t)2738320452287300U, (uint64_t)2380753632109507U,
-    (uint64_t)2762090901149075U, (uint64_t)123455059136515U, (uint64_t)4222807813169807U,
-    (uint64_t)118064783651432U, (uint64_t)2877694712254934U, (uint64_t)3535027426396448U,
-    (uint64_t)100175663703417U, (uint64_t)3287921121213155U, (uint64_t)4497246481824206U,
-    (uint64_t)1960809949007025U, (uint64_t)3236854264159102U, (uint64_t)35028112623717U,
-    (uint64_t)338838627913273U, (uint64_t)2827531947914645U, (uint64_t)4231826783810670U,
-    (uint64_t)1082490106100389U, (uint64_t)13267544387448U, (uint64_t)4249975884259105U,
-    (uint64_t)2844862161652484U, (uint64_t)262742197948971U, (uint64_t)3525653802457116U,
-    (uint64_t)269963889261701U, (uint64_t)3690062482117102U, (uint64_t)675413453822147U,
-    (uint64_t)2170937868437574U, (uint64_t)2367632187022010U, (uint64_t)214032802409445U,
-    (uint64_t)2054007379612477U, (uint64_t)3558050826739009U, (uint64_t)266827184752634U,
-    (uint64_t)1946520293291195U, (uint64_t)238087872386556U, (uint64_t)490056555385700U,
-    (uint64_t)794405769357386U, (uint64_t)3886901294859702U, (uint64_t)3120414548626348U,
-    (uint64_t)84316625221136U, (uint64_t)223073962531835U, (uint64_t)4280846460577631U,
-    (uint64_t)344296282849308U, (uint64_t)3522116652699457U, (uint64_t)171817232053075U,
-    (uint64_t)3296636283062273U, (uint64_t)3587303364425579U, (uint64_t)1033485783633331U,
-    (uint64_t)3686984130812906U, (uint64_t)268290803650477U, (uint64_t)2803988215834467U,
-    (uint64_t)3821246410529720U, (uint64_t)1077722388925870U, (uint64_t)4187137036866164U,
-    (uint64_t)104696540795905U, (uint64_t)998770003854764U, (uint64_t)3960768137535019U,
-    (uint64_t)4293792474919135U, (uint64_t)3251297981727034U, (uint64_t)192479028790101U,
-    (uint64_t)1175880869349935U, (uint64_t)3506949259311937U, (uint64_t)2161711516160714U,
-    (uint64_t)2506820922270187U, (uint64_t)131002200661047U, (uint64_t)3532399477339994U,
-    (uint64_t)2515815721228719U, (uint64_t)4274974119021502U, (uint64_t)265752394510924U,
-    (uint64_t)163144272153395U, (uint64_t)2824260010502991U, (uint64_t)517077012665142U,
-    (uint64_t)602987073882924U, (uint64_t)2939630061751780U, (uint64_t)59211609557440U,
-    (uint64_t)963423614549333U, (uint64_t)495476232754434U, (uint64_t)94274496109103U,
-    (uint64_t)2245136222990187U, (uint64_t)185414764872288U, (uint64_t)2266067668609289U,
-    (uint64_t)3873978896235927U, (uint64_t)4428283513152105U, (uint64_t)3881481480259312U,
-    (uint64_t)207746202010862U, (uint64_t)1609437858011364U, (uint64_t)477585758421515U,
-    (uint64_t)3850430788664649U, (uint64_t)2682299074459173U, (uint64_t)149439089751274U,
-    (uint64_t)3665760243877698U, (uint64_t)1356661512658931U, (uint64_t)1675903262368322U,
-    (uint64_t)3355649228050892U, (uint64_t)99772108898412U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1277614565900951ULL, 378671684419493ULL, 3176260448102880ULL, 1575691435565077ULL,
+    167304528382180ULL, 2600787765776588ULL, 7497946149293ULL, 2184272641272202ULL,
+    2200235265236628ULL, 265969268774814ULL, 1913228635640715ULL, 2831959046949342ULL,
+    888030405442963ULL, 1817092932985033ULL, 101515844997121ULL, 3309468394859588ULL,
+    3965334773689948ULL, 1945272965790738ULL, 4450939211427964ULL, 211349698782702ULL,
+    2085160302160079ULL, 212812506072603ULL, 3646122434511764ULL, 1711405092320514ULL,
+    95160920508464ULL, 1677683368518073ULL, 4384656939250953ULL, 3548591046529893ULL,
+    1683233536091384ULL, 105919586159941ULL, 1941416002726455ULL, 246264372248216ULL,
+    3063044110922228ULL, 3772292170415825ULL, 222933374989815ULL, 2417211163452935ULL,
+    2018230365573200ULL, 1985974538911047ULL, 1387197705332739ULL, 186400825584956ULL,
+    2469330487750329ULL, 1291983813301638ULL, 333416733706302ULL, 3413315564261070ULL,
+    189444777569683ULL, 1062005622360420ULL, 1800197715938740ULL, 3693110992551647ULL,
+    626990328941945ULL, 40998857100520ULL, 3921983552805085ULL, 1016632437340656ULL,
+    4016615929950878ULL, 2682554586771281ULL, 7043555162389ULL, 3333819830676567ULL,
+    4120091964944036ULL, 1960788263484015ULL, 1642145656273304ULL, 252814075789128ULL,
+    3085777342821357ULL, 4166637997604052ULL, 1339401689756469ULL, 845938529607551ULL,
+    223351828189283ULL, 1148648705186890ULL, 1230525014760605ULL, 1869739475126720ULL,
+    4193966261205530ULL, 175684010336013ULL, 4476719358931508ULL, 4209547487457638ULL,
+    2197536411673724ULL, 3010838433412303ULL, 169318997251483ULL, 49493868302162ULL,
+    3594601099078584ULL, 3662420905445942ULL, 3606544932233685ULL, 270643652662165ULL,
+    180681786228544ULL, 2095882682308564ULL, 813484483841391ULL, 1622665392824698ULL,
+    113821770225137ULL, 3075432444115417ULL, 716502989978722ULL, 2304779892217245ULL,
+    1760144151770127ULL, 235719156963938ULL, 3180013070471143ULL, 1331027634540579ULL,
+    552273022992392ULL, 2858693077461887ULL, 197914407731510ULL, 187252310910959ULL,
+    4160637171377125ULL, 3225059526713298ULL, 2574558217383978ULL, 249695600622489ULL,
+    364988742814327ULL, 4245298536326258ULL, 1812464706589342ULL, 2734857123772998ULL,
+    120105577124628ULL, 160179251271109ULL, 3604555733307834ULL, 150380003195715ULL,
+    1574304909935121ULL, 142190285600761ULL, 1835385847725651ULL, 3168087139615901ULL,
+    3201434861713736ULL, 741757984537760ULL, 163585009419543ULL, 3837997981109783ULL,
+    3771946407870997ULL, 2867641360295452ULL, 3097548691501578ULL, 124624912142104ULL,
+    2729896088769328ULL, 1087786827035225ULL, 3934000813818614ULL, 1176792318645055ULL,
+    125311882169270ULL, 3530709439299502ULL, 1561477829834527ULL, 3927894570196761ULL,
+    3957765307669212ULL, 105720519513730ULL, 3758969845816997ULL, 2738320452287300ULL,
+    2380753632109507ULL, 2762090901149075ULL, 123455059136515ULL, 4222807813169807ULL,
+    118064783651432ULL, 2877694712254934ULL, 3535027426396448ULL, 100175663703417ULL,
+    3287921121213155ULL, 4497246481824206ULL, 1960809949007025ULL, 3236854264159102ULL,
+    35028112623717ULL, 338838627913273ULL, 2827531947914645ULL, 4231826783810670ULL,
+    1082490106100389ULL, 13267544387448ULL, 4249975884259105ULL, 2844862161652484ULL,
+    262742197948971ULL, 3525653802457116ULL, 269963889261701ULL, 3690062482117102ULL,
+    675413453822147ULL, 2170937868437574ULL, 2367632187022010ULL, 214032802409445ULL,
+    2054007379612477ULL, 3558050826739009ULL, 266827184752634ULL, 1946520293291195ULL,
+    238087872386556ULL, 490056555385700ULL, 794405769357386ULL, 3886901294859702ULL,
+    3120414548626348ULL, 84316625221136ULL, 223073962531835ULL, 4280846460577631ULL,
+    344296282849308ULL, 3522116652699457ULL, 171817232053075ULL, 3296636283062273ULL,
+    3587303364425579ULL, 1033485783633331ULL, 3686984130812906ULL, 268290803650477ULL,
+    2803988215834467ULL, 3821246410529720ULL, 1077722388925870ULL, 4187137036866164ULL,
+    104696540795905ULL, 998770003854764ULL, 3960768137535019ULL, 4293792474919135ULL,
+    3251297981727034ULL, 192479028790101ULL, 1175880869349935ULL, 3506949259311937ULL,
+    2161711516160714ULL, 2506820922270187ULL, 131002200661047ULL, 3532399477339994ULL,
+    2515815721228719ULL, 4274974119021502ULL, 265752394510924ULL, 163144272153395ULL,
+    2824260010502991ULL, 517077012665142ULL, 602987073882924ULL, 2939630061751780ULL,
+    59211609557440ULL, 963423614549333ULL, 495476232754434ULL, 94274496109103ULL,
+    2245136222990187ULL, 185414764872288ULL, 2266067668609289ULL, 3873978896235927ULL,
+    4428283513152105ULL, 3881481480259312ULL, 207746202010862ULL, 1609437858011364ULL,
+    477585758421515ULL, 3850430788664649ULL, 2682299074459173ULL, 149439089751274ULL,
+    3665760243877698ULL, 1356661512658931ULL, 1675903262368322ULL, 3355649228050892ULL,
+    99772108898412ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)34056422761564U,
-    (uint64_t)3315864838337811U, (uint64_t)3797032336888745U, (uint64_t)2580641850480806U,
-    (uint64_t)208048944042500U, (uint64_t)1233795288689421U, (uint64_t)1048795233382631U,
-    (uint64_t)646545158071530U, (uint64_t)1816025742137285U, (uint64_t)12245672982162U,
-    (uint64_t)2119364213800870U, (uint64_t)2034960311715107U, (uint64_t)3172697815804487U,
-    (uint64_t)4185144850224160U, (uint64_t)2792055915674U, (uint64_t)795534452139321U,
-    (uint64_t)3647836177838185U, (uint64_t)2681403398797991U, (uint64_t)3149264270306207U,
-    (uint64_t)278704080615511U, (uint64_t)2752552368344718U, (uint64_t)1363840972378818U,
-    (uint64_t)1877521512083293U, (uint64_t)1862111388059470U, (uint64_t)36200324115014U,
-    (uint64_t)4183622899327217U, (uint64_t)747381675363076U, (uint64_t)2772916395314624U,
-    (uint64_t)833767013119965U, (uint64_t)246274452928088U, (uint64_t)1526238021297781U,
-    (uint64_t)3327534966022747U, (uint64_t)1169012581910517U, (uint64_t)4430894603030025U,
-    (uint64_t)149242742442115U, (uint64_t)1002569704307172U, (uint64_t)2763252093432365U,
-    (uint64_t)3037748497732938U, (uint64_t)2329811173939457U, (uint64_t)270769113180752U,
-    (uint64_t)4344092461623432U, (uint64_t)892200524589382U, (uint64_t)2511418516713970U,
-    (uint64_t)103575031265398U, (uint64_t)183736033430252U, (uint64_t)583003071257308U,
-    (uint64_t)3357167344738425U, (uint64_t)4038099763242651U, (uint64_t)1776250620957255U,
-    (uint64_t)51334115864192U, (uint64_t)2616405698969611U, (uint64_t)1196364755910565U,
-    (uint64_t)3135228056210500U, (uint64_t)533729417611761U, (uint64_t)86564351229326U,
-    (uint64_t)98936129527281U, (uint64_t)4425305036630677U, (uint64_t)2980296390253408U,
-    (uint64_t)2487091677325739U, (uint64_t)10501977234280U, (uint64_t)1805646499831077U,
-    (uint64_t)3120615962395477U, (uint64_t)3634629685307533U, (uint64_t)3009632755291436U,
-    (uint64_t)16794051906523U, (uint64_t)2465481597883214U, (uint64_t)211492787490403U,
-    (uint64_t)1120942867046103U, (uint64_t)486438308572108U, (uint64_t)76058986271771U,
-    (uint64_t)2435216584587357U, (uint64_t)3076359381968283U, (uint64_t)1071594491489655U,
-    (uint64_t)3148707450339154U, (uint64_t)249332205737851U, (uint64_t)4171051176626809U,
-    (uint64_t)3165176227956388U, (uint64_t)2400901591835233U, (uint64_t)1435783621333022U,
-    (uint64_t)20312753440321U, (uint64_t)1767293887448005U, (uint64_t)685150647587522U,
-    (uint64_t)2957187934449906U, (uint64_t)382661319140439U, (uint64_t)177583591139601U,
-    (uint64_t)2083572648630743U, (uint64_t)1083410277889419U, (uint64_t)4267902097868310U,
-    (uint64_t)679989918385081U, (uint64_t)123155311554032U, (uint64_t)2830267662472020U,
-    (uint64_t)4476040509735924U, (uint64_t)526697201585144U, (uint64_t)3465306430573135U,
-    (uint64_t)2296616218591U, (uint64_t)1270626872734279U, (uint64_t)1049740198790549U,
-    (uint64_t)4197567214843444U, (uint64_t)1962225231320591U, (uint64_t)186125026796856U,
-    (uint64_t)737027567341142U, (uint64_t)4364616098174U, (uint64_t)3618884818756660U,
-    (uint64_t)1236837563717668U, (uint64_t)162873772439548U, (uint64_t)3081542470065122U,
-    (uint64_t)910331750163991U, (uint64_t)2110498143869827U, (uint64_t)3208473121852657U,
-    (uint64_t)94687786224509U, (uint64_t)4113309027567819U, (uint64_t)4272179438357536U,
-    (uint64_t)1857418654076140U, (uint64_t)1672678841741004U, (uint64_t)94482160248411U,
-    (uint64_t)1928652436799020U, (uint64_t)1750866462381515U, (uint64_t)4048060485672270U,
-    (uint64_t)4006680581258587U, (uint64_t)14850434761312U, (uint64_t)2828734997081648U,
-    (uint64_t)1975589525873972U, (uint64_t)3724347738416009U, (uint64_t)597163266689736U,
-    (uint64_t)14568362978551U, (uint64_t)2203865455839744U, (uint64_t)2237034958890595U,
-    (uint64_t)1863572986731818U, (uint64_t)2329774560279041U, (uint64_t)245105447642201U,
-    (uint64_t)2179697447864822U, (uint64_t)1769609498189882U, (uint64_t)1916950746430931U,
-    (uint64_t)847019613787312U, (uint64_t)163210606565100U, (uint64_t)3658248417400062U,
-    (uint64_t)717138296045881U, (uint64_t)42531212306121U, (uint64_t)1040915917097532U,
-    (uint64_t)77364489101310U, (uint64_t)539253504015590U, (uint64_t)732690726289841U,
-    (uint64_t)3401622034697806U, (uint64_t)2864593278358513U, (uint64_t)142611941887017U,
-    (uint64_t)536364617506702U, (uint64_t)845071859974284U, (uint64_t)4461787417089721U,
-    (uint64_t)2633811871939723U, (uint64_t)113619731985610U, (uint64_t)2535870015489566U,
-    (uint64_t)2146224665077830U, (uint64_t)2593725534662047U, (uint64_t)1332349537449710U,
-    (uint64_t)153375287068096U, (uint64_t)3689977177165276U, (uint64_t)3631865615314120U,
-    (uint64_t)184644878348929U, (uint64_t)2220481726602813U, (uint64_t)204002551273091U,
-    (uint64_t)3022560051766785U, (uint64_t)3125940458001213U, (uint64_t)4258299086906325U,
-    (uint64_t)1072471915162030U, (uint64_t)2797562724530U, (uint64_t)3974298156223059U,
-    (uint64_t)1624778551002554U, (uint64_t)3490703864485971U, (uint64_t)2533877484212458U,
-    (uint64_t)176107782538555U, (uint64_t)4275987398312137U, (uint64_t)4397120757693722U,
-    (uint64_t)3001292763847390U, (uint64_t)1556490837621310U, (uint64_t)70442953037671U,
-    (uint64_t)1558915972545974U, (uint64_t)744724505252845U, (uint64_t)2697230204313363U,
-    (uint64_t)3495671924212144U, (uint64_t)95744296878924U, (uint64_t)1508848630912047U,
-    (uint64_t)4163599342850968U, (uint64_t)1234988733935901U, (uint64_t)3789722472212706U,
-    (uint64_t)219522007052022U, (uint64_t)2106597506701262U, (uint64_t)3231115099832239U,
-    (uint64_t)1296436890593905U, (uint64_t)1016795619587656U, (uint64_t)231150565033388U,
-    (uint64_t)4205501688458754U, (uint64_t)2271569140386062U, (uint64_t)3421769599058157U,
-    (uint64_t)4118408853784554U, (uint64_t)276709341465173U, (uint64_t)2681340614854362U,
-    (uint64_t)2514413365628788U, (uint64_t)62294545067341U, (uint64_t)277610220069365U,
-    (uint64_t)252463150123799U, (uint64_t)2547353593759399U, (uint64_t)1857438147448607U,
-    (uint64_t)2964811969681256U, (uint64_t)3303706463835387U, (uint64_t)248936570980853U,
-    (uint64_t)3208982702478009U, (uint64_t)2518671051730787U, (uint64_t)727433853033835U,
-    (uint64_t)1290389308223446U, (uint64_t)220742793981035U, (uint64_t)3851225361654709U,
-    (uint64_t)2307489307934273U, (uint64_t)1151710489948266U, (uint64_t)289775285210516U,
-    (uint64_t)222685002397295U, (uint64_t)1222117478082108U, (uint64_t)2822029169395728U,
-    (uint64_t)1172146252219882U, (uint64_t)2626108105510259U, (uint64_t)209803527887167U,
-    (uint64_t)2718831919953281U, (uint64_t)4348638387588593U, (uint64_t)3761438313263183U,
-    (uint64_t)13169515318095U, (uint64_t)212893621229476U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    34056422761564ULL, 3315864838337811ULL, 3797032336888745ULL, 2580641850480806ULL,
+    208048944042500ULL, 1233795288689421ULL, 1048795233382631ULL, 646545158071530ULL,
+    1816025742137285ULL, 12245672982162ULL, 2119364213800870ULL, 2034960311715107ULL,
+    3172697815804487ULL, 4185144850224160ULL, 2792055915674ULL, 795534452139321ULL,
+    3647836177838185ULL, 2681403398797991ULL, 3149264270306207ULL, 278704080615511ULL,
+    2752552368344718ULL, 1363840972378818ULL, 1877521512083293ULL, 1862111388059470ULL,
+    36200324115014ULL, 4183622899327217ULL, 747381675363076ULL, 2772916395314624ULL,
+    833767013119965ULL, 246274452928088ULL, 1526238021297781ULL, 3327534966022747ULL,
+    1169012581910517ULL, 4430894603030025ULL, 149242742442115ULL, 1002569704307172ULL,
+    2763252093432365ULL, 3037748497732938ULL, 2329811173939457ULL, 270769113180752ULL,
+    4344092461623432ULL, 892200524589382ULL, 2511418516713970ULL, 103575031265398ULL,
+    183736033430252ULL, 583003071257308ULL, 3357167344738425ULL, 4038099763242651ULL,
+    1776250620957255ULL, 51334115864192ULL, 2616405698969611ULL, 1196364755910565ULL,
+    3135228056210500ULL, 533729417611761ULL, 86564351229326ULL, 98936129527281ULL,
+    4425305036630677ULL, 2980296390253408ULL, 2487091677325739ULL, 10501977234280ULL,
+    1805646499831077ULL, 3120615962395477ULL, 3634629685307533ULL, 3009632755291436ULL,
+    16794051906523ULL, 2465481597883214ULL, 211492787490403ULL, 1120942867046103ULL,
+    486438308572108ULL, 76058986271771ULL, 2435216584587357ULL, 3076359381968283ULL,
+    1071594491489655ULL, 3148707450339154ULL, 249332205737851ULL, 4171051176626809ULL,
+    3165176227956388ULL, 2400901591835233ULL, 1435783621333022ULL, 20312753440321ULL,
+    1767293887448005ULL, 685150647587522ULL, 2957187934449906ULL, 382661319140439ULL,
+    177583591139601ULL, 2083572648630743ULL, 1083410277889419ULL, 4267902097868310ULL,
+    679989918385081ULL, 123155311554032ULL, 2830267662472020ULL, 4476040509735924ULL,
+    526697201585144ULL, 3465306430573135ULL, 2296616218591ULL, 1270626872734279ULL,
+    1049740198790549ULL, 4197567214843444ULL, 1962225231320591ULL, 186125026796856ULL,
+    737027567341142ULL, 4364616098174ULL, 3618884818756660ULL, 1236837563717668ULL,
+    162873772439548ULL, 3081542470065122ULL, 910331750163991ULL, 2110498143869827ULL,
+    3208473121852657ULL, 94687786224509ULL, 4113309027567819ULL, 4272179438357536ULL,
+    1857418654076140ULL, 1672678841741004ULL, 94482160248411ULL, 1928652436799020ULL,
+    1750866462381515ULL, 4048060485672270ULL, 4006680581258587ULL, 14850434761312ULL,
+    2828734997081648ULL, 1975589525873972ULL, 3724347738416009ULL, 597163266689736ULL,
+    14568362978551ULL, 2203865455839744ULL, 2237034958890595ULL, 1863572986731818ULL,
+    2329774560279041ULL, 245105447642201ULL, 2179697447864822ULL, 1769609498189882ULL,
+    1916950746430931ULL, 847019613787312ULL, 163210606565100ULL, 3658248417400062ULL,
+    717138296045881ULL, 42531212306121ULL, 1040915917097532ULL, 77364489101310ULL,
+    539253504015590ULL, 732690726289841ULL, 3401622034697806ULL, 2864593278358513ULL,
+    142611941887017ULL, 536364617506702ULL, 845071859974284ULL, 4461787417089721ULL,
+    2633811871939723ULL, 113619731985610ULL, 2535870015489566ULL, 2146224665077830ULL,
+    2593725534662047ULL, 1332349537449710ULL, 153375287068096ULL, 3689977177165276ULL,
+    3631865615314120ULL, 184644878348929ULL, 2220481726602813ULL, 204002551273091ULL,
+    3022560051766785ULL, 3125940458001213ULL, 4258299086906325ULL, 1072471915162030ULL,
+    2797562724530ULL, 3974298156223059ULL, 1624778551002554ULL, 3490703864485971ULL,
+    2533877484212458ULL, 176107782538555ULL, 4275987398312137ULL, 4397120757693722ULL,
+    3001292763847390ULL, 1556490837621310ULL, 70442953037671ULL, 1558915972545974ULL,
+    744724505252845ULL, 2697230204313363ULL, 3495671924212144ULL, 95744296878924ULL,
+    1508848630912047ULL, 4163599342850968ULL, 1234988733935901ULL, 3789722472212706ULL,
+    219522007052022ULL, 2106597506701262ULL, 3231115099832239ULL, 1296436890593905ULL,
+    1016795619587656ULL, 231150565033388ULL, 4205501688458754ULL, 2271569140386062ULL,
+    3421769599058157ULL, 4118408853784554ULL, 276709341465173ULL, 2681340614854362ULL,
+    2514413365628788ULL, 62294545067341ULL, 277610220069365ULL, 252463150123799ULL,
+    2547353593759399ULL, 1857438147448607ULL, 2964811969681256ULL, 3303706463835387ULL,
+    248936570980853ULL, 3208982702478009ULL, 2518671051730787ULL, 727433853033835ULL,
+    1290389308223446ULL, 220742793981035ULL, 3851225361654709ULL, 2307489307934273ULL,
+    1151710489948266ULL, 289775285210516ULL, 222685002397295ULL, 1222117478082108ULL,
+    2822029169395728ULL, 1172146252219882ULL, 2626108105510259ULL, 209803527887167ULL,
+    2718831919953281ULL, 4348638387588593ULL, 3761438313263183ULL, 13169515318095ULL,
+    212893621229476ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_basepoint_table_w5[480U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)705178180786072U,
-    (uint64_t)3855836460717471U, (uint64_t)4089131105950716U, (uint64_t)3301581525494108U,
-    (uint64_t)133858670344668U, (uint64_t)2199641648059576U, (uint64_t)1278080618437060U,
-    (uint64_t)3959378566518708U, (uint64_t)3455034269351872U, (uint64_t)79417610544803U,
-    (uint64_t)1U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)1282049064345544U, (uint64_t)971732600440099U, (uint64_t)1014594595727339U,
-    (uint64_t)4392159187541980U, (uint64_t)268327875692285U, (uint64_t)2411661712280539U,
-    (uint64_t)1092576199280126U, (uint64_t)4328619610718051U, (uint64_t)3535440816471627U,
-    (uint64_t)95182251488556U, (uint64_t)1893725512243753U, (uint64_t)3619861457111820U,
-    (uint64_t)879374960417905U, (uint64_t)2868056058129113U, (uint64_t)273195291893682U,
-    (uint64_t)2044797305960112U, (uint64_t)2357106853933780U, (uint64_t)3563112438336058U,
-    (uint64_t)2430811541762558U, (uint64_t)106443809495428U, (uint64_t)2231357633909668U,
-    (uint64_t)3641705835951936U, (uint64_t)80642569314189U, (uint64_t)2254841882373268U,
-    (uint64_t)149848031966573U, (uint64_t)2304615661367764U, (uint64_t)2410957403736446U,
-    (uint64_t)2712754805859804U, (uint64_t)2440183877540536U, (uint64_t)99784623895865U,
-    (uint64_t)3667773127482758U, (uint64_t)1354899394473308U, (uint64_t)3636602998800808U,
-    (uint64_t)2709296679846364U, (uint64_t)7253362091963U, (uint64_t)3585950735562744U,
-    (uint64_t)935775991758415U, (uint64_t)4108078106735201U, (uint64_t)556081800336307U,
-    (uint64_t)229585977163057U, (uint64_t)4055594186679801U, (uint64_t)1767681004944933U,
-    (uint64_t)1432634922083242U, (uint64_t)534935602949197U, (uint64_t)251753159522567U,
-    (uint64_t)2846474078499321U, (uint64_t)4488649590348702U, (uint64_t)2437476916025038U,
-    (uint64_t)3040577412822874U, (uint64_t)79405234918614U, (uint64_t)3030621226551508U,
-    (uint64_t)2801117003929806U, (uint64_t)1642927515498422U, (uint64_t)2802725079726297U,
-    (uint64_t)8472780626107U, (uint64_t)866068070352655U, (uint64_t)188080768545106U,
-    (uint64_t)2152119998903058U, (uint64_t)3391239985029665U, (uint64_t)23820026013564U,
-    (uint64_t)2965064154891949U, (uint64_t)1846516097921398U, (uint64_t)4418379948133146U,
-    (uint64_t)3137755426942400U, (uint64_t)47705291301781U, (uint64_t)4278533051105665U,
-    (uint64_t)3453643211214931U, (uint64_t)3379734319145156U, (uint64_t)3762442192097039U,
-    (uint64_t)40243003528694U, (uint64_t)4063448994211201U, (uint64_t)5697015368785U,
-    (uint64_t)1006545411838613U, (uint64_t)4242291693755210U, (uint64_t)135184629190512U,
-    (uint64_t)264898689131035U, (uint64_t)611796474823597U, (uint64_t)3255382250029089U,
-    (uint64_t)3490429246984696U, (uint64_t)236558595864362U, (uint64_t)2055934691551704U,
-    (uint64_t)1487711670114502U, (uint64_t)1823930698221632U, (uint64_t)2130937287438472U,
-    (uint64_t)154610053389779U, (uint64_t)2746573287023216U, (uint64_t)2430987262221221U,
-    (uint64_t)1668741642878689U, (uint64_t)904982541243977U, (uint64_t)56087343124948U,
-    (uint64_t)393905062353536U, (uint64_t)412681877350188U, (uint64_t)3153602040979977U,
-    (uint64_t)4466820876224989U, (uint64_t)146579165617857U, (uint64_t)2628741216508991U,
-    (uint64_t)747994231529806U, (uint64_t)750506569317681U, (uint64_t)1887492790748779U,
-    (uint64_t)35259008682771U, (uint64_t)2085116434894208U, (uint64_t)543291398921711U,
-    (uint64_t)1144362007901552U, (uint64_t)679305136036846U, (uint64_t)141090902244489U,
-    (uint64_t)632480954474859U, (uint64_t)2384513102652591U, (uint64_t)2225529790159790U,
-    (uint64_t)692258664851625U, (uint64_t)198681843567699U, (uint64_t)2397092587228181U,
-    (uint64_t)145862822166614U, (uint64_t)196976540479452U, (uint64_t)3321831130141455U,
-    (uint64_t)69266673089832U, (uint64_t)4469644227342284U, (uint64_t)3899271145504796U,
-    (uint64_t)1261890974076660U, (uint64_t)525357673886694U, (uint64_t)182135997828583U,
-    (uint64_t)4292760618810332U, (uint64_t)3404186545541683U, (uint64_t)312297386688768U,
-    (uint64_t)204377466824608U, (uint64_t)230900767857952U, (uint64_t)3871485172339693U,
-    (uint64_t)779449329662955U, (uint64_t)978655822464694U, (uint64_t)2278252139594027U,
-    (uint64_t)104641527040382U, (uint64_t)3528840153625765U, (uint64_t)4484699080275273U,
-    (uint64_t)1463971951102316U, (uint64_t)4013910812844749U, (uint64_t)228915589433620U,
-    (uint64_t)1209641433482461U, (uint64_t)4043178788774759U, (uint64_t)3008668238856634U,
-    (uint64_t)1448425089071412U, (uint64_t)26269719725037U, (uint64_t)3330785027545223U,
-    (uint64_t)852657975349259U, (uint64_t)227245054466105U, (uint64_t)1534632353984777U,
-    (uint64_t)207715098574660U, (uint64_t)3209837527352280U, (uint64_t)4051688046309066U,
-    (uint64_t)3839009590725955U, (uint64_t)1321506437398842U, (uint64_t)68340219159928U,
-    (uint64_t)1806950276956275U, (uint64_t)3923908055275295U, (uint64_t)743963253393575U,
-    (uint64_t)42162407478783U, (uint64_t)261334584474610U, (uint64_t)3728224928885214U,
-    (uint64_t)4004701081842869U, (uint64_t)709043201644674U, (uint64_t)4267294249150171U,
-    (uint64_t)255540582975025U, (uint64_t)875490593722211U, (uint64_t)796393708218375U,
-    (uint64_t)14774425627956U, (uint64_t)1500040516752097U, (uint64_t)141076627721678U,
-    (uint64_t)2634539368480628U, (uint64_t)1106488853550103U, (uint64_t)2346231921151930U,
-    (uint64_t)897108283954283U, (uint64_t)64616679559843U, (uint64_t)400244949840943U,
-    (uint64_t)1731263826831733U, (uint64_t)1649996579904651U, (uint64_t)3643693449640761U,
-    (uint64_t)172543068638991U, (uint64_t)329537981097182U, (uint64_t)2029799860802869U,
-    (uint64_t)4377737515208862U, (uint64_t)29103311051334U, (uint64_t)265583594111499U,
-    (uint64_t)3798074876561255U, (uint64_t)184749333259352U, (uint64_t)3117395073661801U,
-    (uint64_t)3695784565008833U, (uint64_t)64282709896721U, (uint64_t)1618968913246422U,
-    (uint64_t)3185235128095257U, (uint64_t)3288745068118692U, (uint64_t)1963818603508782U,
-    (uint64_t)281054350739495U, (uint64_t)1658639050810346U, (uint64_t)3061097601679552U,
-    (uint64_t)3023781433263746U, (uint64_t)2770283391242475U, (uint64_t)144508864751908U,
-    (uint64_t)173576288079856U, (uint64_t)46114579547054U, (uint64_t)1679480127300211U,
-    (uint64_t)1683062051644007U, (uint64_t)117183826129323U, (uint64_t)1894068608117440U,
-    (uint64_t)3846899838975733U, (uint64_t)4289279019496192U, (uint64_t)176995887914031U,
-    (uint64_t)78074942938713U, (uint64_t)454207263265292U, (uint64_t)972683614054061U,
-    (uint64_t)808474205144361U, (uint64_t)942703935951735U, (uint64_t)134460241077887U,
-    (uint64_t)2104196179349630U, (uint64_t)501632371208418U, (uint64_t)1666838991431177U,
-    (uint64_t)445606193139838U, (uint64_t)73704603396096U, (uint64_t)3140284774064777U,
-    (uint64_t)1356066420820179U, (uint64_t)227054159419281U, (uint64_t)1847611229198687U,
-    (uint64_t)82327838827660U, (uint64_t)3704027573265803U, (uint64_t)1585260489220244U,
-    (uint64_t)4404647914931933U, (uint64_t)2424649827425515U, (uint64_t)206821944206116U,
-    (uint64_t)1508635776287972U, (uint64_t)1933584575629676U, (uint64_t)1903635423783032U,
-    (uint64_t)4193642165165650U, (uint64_t)234321074690644U, (uint64_t)210406774251925U,
-    (uint64_t)1965845668185599U, (uint64_t)3059839433804731U, (uint64_t)1933300510683631U,
-    (uint64_t)150696600689211U, (uint64_t)4069293682158567U, (uint64_t)4346344602660044U,
-    (uint64_t)312200249664561U, (uint64_t)2495020807621840U, (uint64_t)1912707714385U,
-    (uint64_t)299345978159762U, (uint64_t)1164752722686920U, (uint64_t)225322433710338U,
-    (uint64_t)3128747381283759U, (uint64_t)275659067815583U, (uint64_t)1489671057429039U,
-    (uint64_t)1567693343342676U, (uint64_t)921672046098071U, (uint64_t)3707418899384085U,
-    (uint64_t)54646424931593U, (uint64_t)4026733380127147U, (uint64_t)2933435393699231U,
-    (uint64_t)3356593659521967U, (uint64_t)3637750749325529U, (uint64_t)232939412379045U,
-    (uint64_t)2298399636043069U, (uint64_t)270361546063041U, (uint64_t)2523933572551420U,
-    (uint64_t)3456896091572950U, (uint64_t)185447004732850U, (uint64_t)429322937697821U,
-    (uint64_t)2579704215668222U, (uint64_t)695065378803349U, (uint64_t)3987916247731243U,
-    (uint64_t)255159546348233U, (uint64_t)3057777929921282U, (uint64_t)1608970699916312U,
-    (uint64_t)1902369623063807U, (uint64_t)1413619643652777U, (uint64_t)94983996321227U,
-    (uint64_t)2832873179548050U, (uint64_t)4335430233622555U, (uint64_t)1559023976028843U,
-    (uint64_t)3297181988648895U, (uint64_t)100072021232323U, (uint64_t)2124984034109675U,
-    (uint64_t)4501252835618918U, (uint64_t)2053336899483297U, (uint64_t)638807226463876U,
-    (uint64_t)278445213600634U, (uint64_t)2311236445660555U, (uint64_t)303317664040012U,
-    (uint64_t)2659353858089024U, (uint64_t)3598827423980130U, (uint64_t)176059343827873U,
-    (uint64_t)3891639526275437U, (uint64_t)252823982819463U, (uint64_t)3404823300622345U,
-    (uint64_t)2758370772497456U, (uint64_t)91397496598783U, (uint64_t)2248661144141892U,
-    (uint64_t)491087075271969U, (uint64_t)1786344894571315U, (uint64_t)452497694885923U,
-    (uint64_t)34039628873357U, (uint64_t)2116503165025197U, (uint64_t)4436733709429923U,
-    (uint64_t)3045800776819238U, (uint64_t)1385518906078375U, (uint64_t)110495603336764U,
-    (uint64_t)4051447296249587U, (uint64_t)1103557421498625U, (uint64_t)1840785058439622U,
-    (uint64_t)425322753992314U, (uint64_t)98330046771676U, (uint64_t)365407468686431U,
-    (uint64_t)2611246859977123U, (uint64_t)3050253933135339U, (uint64_t)1006482220896688U,
-    (uint64_t)166818196428389U, (uint64_t)3415236093104372U, (uint64_t)1762308883882288U,
-    (uint64_t)1327828123094558U, (uint64_t)3403946425556706U, (uint64_t)96503464455441U,
-    (uint64_t)3893015304031471U, (uint64_t)3740839477490397U, (uint64_t)2411470812852231U,
-    (uint64_t)940927462436211U, (uint64_t)163825285911099U, (uint64_t)1622441495640386U,
-    (uint64_t)850224095680266U, (uint64_t)76199085900939U, (uint64_t)1941852365144042U,
-    (uint64_t)140326673652807U, (uint64_t)3161611011249524U, (uint64_t)317297150009965U,
-    (uint64_t)2145053259340619U, (uint64_t)2180498176457552U, (uint64_t)38457740506224U,
-    (uint64_t)394174899129468U, (uint64_t)2687474560485245U, (uint64_t)1542175980184516U,
-    (uint64_t)1628502671124819U, (uint64_t)48477401124385U, (uint64_t)4474181600025082U,
-    (uint64_t)2142747956365708U, (uint64_t)1638299432475478U, (uint64_t)2005869320353249U,
-    (uint64_t)112292630760956U, (uint64_t)1887521965171588U, (uint64_t)457587531429696U,
-    (uint64_t)840994209504042U, (uint64_t)4268060856325798U, (uint64_t)195597993440388U,
-    (uint64_t)4148484749020338U, (uint64_t)2074885000909672U, (uint64_t)2309839019263165U,
-    (uint64_t)2087616209681024U, (uint64_t)257214370719966U, (uint64_t)2331363508376581U,
-    (uint64_t)1233124357504711U, (uint64_t)2849542202650296U, (uint64_t)3790982825325736U,
-    (uint64_t)13381453503890U, (uint64_t)1665246594531069U, (uint64_t)4165624287443904U,
-    (uint64_t)3418759698027493U, (uint64_t)2118493255117399U, (uint64_t)136249206366067U,
-    (uint64_t)4064050233283309U, (uint64_t)1368779887911300U, (uint64_t)4370550759530269U,
-    (uint64_t)66992990631341U, (uint64_t)84442368922270U, (uint64_t)2139322635321394U,
-    (uint64_t)2076163483726795U, (uint64_t)657097866349103U, (uint64_t)2095579409488071U,
-    (uint64_t)226525774791341U, (uint64_t)4445744257665359U, (uint64_t)2035752839278107U,
-    (uint64_t)1998242662838304U, (uint64_t)1601548415521694U, (uint64_t)151297684296198U,
-    (uint64_t)1350963039017303U, (uint64_t)2624916349548281U, (uint64_t)2018863259670197U,
-    (uint64_t)2717274357461290U, (uint64_t)94024796961533U, (uint64_t)711335520409111U,
-    (uint64_t)4322093765820263U, (uint64_t)2041650358174649U, (uint64_t)3439791603157577U,
-    (uint64_t)179292018616267U, (uint64_t)2436436921286669U, (uint64_t)3905268797208340U,
-    (uint64_t)2829194895162985U, (uint64_t)1355175382191543U, (uint64_t)55128779761539U,
-    (uint64_t)2648428998786922U, (uint64_t)869805912573515U, (uint64_t)3706708942847864U,
-    (uint64_t)2785288916584667U, (uint64_t)37156862850147U, (uint64_t)1422245336293228U,
-    (uint64_t)4497066058933021U, (uint64_t)85588912978349U, (uint64_t)2616252221194611U,
-    (uint64_t)53506393720989U, (uint64_t)3727539190732644U, (uint64_t)872132446545237U,
-    (uint64_t)933583590986077U, (uint64_t)3794591170581203U, (uint64_t)167875550514069U,
-    (uint64_t)2267466834993297U, (uint64_t)3072652681756816U, (uint64_t)2108499037430803U,
-    (uint64_t)1606735192928366U, (uint64_t)72339568815255U, (uint64_t)3258484260684219U,
-    (uint64_t)3277927277719855U, (uint64_t)2459560373011535U, (uint64_t)1672794293294033U,
-    (uint64_t)227460934880669U, (uint64_t)3702454405413705U, (uint64_t)106168148441676U,
-    (uint64_t)1356617643071159U, (uint64_t)3280896569942762U, (uint64_t)142618711614302U,
-    (uint64_t)4291782740862057U, (uint64_t)4141020884874235U, (uint64_t)3720787221267125U,
-    (uint64_t)552884940089351U, (uint64_t)174626154407180U, (uint64_t)972071013326540U,
-    (uint64_t)4458530419931903U, (uint64_t)4435168973822858U, (uint64_t)1902967548748411U,
-    (uint64_t)53007977605840U, (uint64_t)2453997334323925U, (uint64_t)3653077937283262U,
-    (uint64_t)850660265046356U, (uint64_t)312721924805450U, (uint64_t)268503679240683U,
-    (uint64_t)256960167714122U, (uint64_t)1474492507858350U, (uint64_t)2456345526438488U,
-    (uint64_t)3686029507160255U, (uint64_t)279158933010398U, (uint64_t)3646946293948063U,
-    (uint64_t)704477527214036U, (uint64_t)3387744169891031U, (uint64_t)3772622670980241U,
-    (uint64_t)136368897543304U, (uint64_t)3744894052577607U, (uint64_t)1976007214443430U,
-    (uint64_t)2090045379763451U, (uint64_t)968565474458988U, (uint64_t)234295114806066U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    705178180786072ULL, 3855836460717471ULL, 4089131105950716ULL, 3301581525494108ULL,
+    133858670344668ULL, 2199641648059576ULL, 1278080618437060ULL, 3959378566518708ULL,
+    3455034269351872ULL, 79417610544803ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1282049064345544ULL,
+    971732600440099ULL, 1014594595727339ULL, 4392159187541980ULL, 268327875692285ULL,
+    2411661712280539ULL, 1092576199280126ULL, 4328619610718051ULL, 3535440816471627ULL,
+    95182251488556ULL, 1893725512243753ULL, 3619861457111820ULL, 879374960417905ULL,
+    2868056058129113ULL, 273195291893682ULL, 2044797305960112ULL, 2357106853933780ULL,
+    3563112438336058ULL, 2430811541762558ULL, 106443809495428ULL, 2231357633909668ULL,
+    3641705835951936ULL, 80642569314189ULL, 2254841882373268ULL, 149848031966573ULL,
+    2304615661367764ULL, 2410957403736446ULL, 2712754805859804ULL, 2440183877540536ULL,
+    99784623895865ULL, 3667773127482758ULL, 1354899394473308ULL, 3636602998800808ULL,
+    2709296679846364ULL, 7253362091963ULL, 3585950735562744ULL, 935775991758415ULL,
+    4108078106735201ULL, 556081800336307ULL, 229585977163057ULL, 4055594186679801ULL,
+    1767681004944933ULL, 1432634922083242ULL, 534935602949197ULL, 251753159522567ULL,
+    2846474078499321ULL, 4488649590348702ULL, 2437476916025038ULL, 3040577412822874ULL,
+    79405234918614ULL, 3030621226551508ULL, 2801117003929806ULL, 1642927515498422ULL,
+    2802725079726297ULL, 8472780626107ULL, 866068070352655ULL, 188080768545106ULL,
+    2152119998903058ULL, 3391239985029665ULL, 23820026013564ULL, 2965064154891949ULL,
+    1846516097921398ULL, 4418379948133146ULL, 3137755426942400ULL, 47705291301781ULL,
+    4278533051105665ULL, 3453643211214931ULL, 3379734319145156ULL, 3762442192097039ULL,
+    40243003528694ULL, 4063448994211201ULL, 5697015368785ULL, 1006545411838613ULL,
+    4242291693755210ULL, 135184629190512ULL, 264898689131035ULL, 611796474823597ULL,
+    3255382250029089ULL, 3490429246984696ULL, 236558595864362ULL, 2055934691551704ULL,
+    1487711670114502ULL, 1823930698221632ULL, 2130937287438472ULL, 154610053389779ULL,
+    2746573287023216ULL, 2430987262221221ULL, 1668741642878689ULL, 904982541243977ULL,
+    56087343124948ULL, 393905062353536ULL, 412681877350188ULL, 3153602040979977ULL,
+    4466820876224989ULL, 146579165617857ULL, 2628741216508991ULL, 747994231529806ULL,
+    750506569317681ULL, 1887492790748779ULL, 35259008682771ULL, 2085116434894208ULL,
+    543291398921711ULL, 1144362007901552ULL, 679305136036846ULL, 141090902244489ULL,
+    632480954474859ULL, 2384513102652591ULL, 2225529790159790ULL, 692258664851625ULL,
+    198681843567699ULL, 2397092587228181ULL, 145862822166614ULL, 196976540479452ULL,
+    3321831130141455ULL, 69266673089832ULL, 4469644227342284ULL, 3899271145504796ULL,
+    1261890974076660ULL, 525357673886694ULL, 182135997828583ULL, 4292760618810332ULL,
+    3404186545541683ULL, 312297386688768ULL, 204377466824608ULL, 230900767857952ULL,
+    3871485172339693ULL, 779449329662955ULL, 978655822464694ULL, 2278252139594027ULL,
+    104641527040382ULL, 3528840153625765ULL, 4484699080275273ULL, 1463971951102316ULL,
+    4013910812844749ULL, 228915589433620ULL, 1209641433482461ULL, 4043178788774759ULL,
+    3008668238856634ULL, 1448425089071412ULL, 26269719725037ULL, 3330785027545223ULL,
+    852657975349259ULL, 227245054466105ULL, 1534632353984777ULL, 207715098574660ULL,
+    3209837527352280ULL, 4051688046309066ULL, 3839009590725955ULL, 1321506437398842ULL,
+    68340219159928ULL, 1806950276956275ULL, 3923908055275295ULL, 743963253393575ULL,
+    42162407478783ULL, 261334584474610ULL, 3728224928885214ULL, 4004701081842869ULL,
+    709043201644674ULL, 4267294249150171ULL, 255540582975025ULL, 875490593722211ULL,
+    796393708218375ULL, 14774425627956ULL, 1500040516752097ULL, 141076627721678ULL,
+    2634539368480628ULL, 1106488853550103ULL, 2346231921151930ULL, 897108283954283ULL,
+    64616679559843ULL, 400244949840943ULL, 1731263826831733ULL, 1649996579904651ULL,
+    3643693449640761ULL, 172543068638991ULL, 329537981097182ULL, 2029799860802869ULL,
+    4377737515208862ULL, 29103311051334ULL, 265583594111499ULL, 3798074876561255ULL,
+    184749333259352ULL, 3117395073661801ULL, 3695784565008833ULL, 64282709896721ULL,
+    1618968913246422ULL, 3185235128095257ULL, 3288745068118692ULL, 1963818603508782ULL,
+    281054350739495ULL, 1658639050810346ULL, 3061097601679552ULL, 3023781433263746ULL,
+    2770283391242475ULL, 144508864751908ULL, 173576288079856ULL, 46114579547054ULL,
+    1679480127300211ULL, 1683062051644007ULL, 117183826129323ULL, 1894068608117440ULL,
+    3846899838975733ULL, 4289279019496192ULL, 176995887914031ULL, 78074942938713ULL,
+    454207263265292ULL, 972683614054061ULL, 808474205144361ULL, 942703935951735ULL,
+    134460241077887ULL, 2104196179349630ULL, 501632371208418ULL, 1666838991431177ULL,
+    445606193139838ULL, 73704603396096ULL, 3140284774064777ULL, 1356066420820179ULL,
+    227054159419281ULL, 1847611229198687ULL, 82327838827660ULL, 3704027573265803ULL,
+    1585260489220244ULL, 4404647914931933ULL, 2424649827425515ULL, 206821944206116ULL,
+    1508635776287972ULL, 1933584575629676ULL, 1903635423783032ULL, 4193642165165650ULL,
+    234321074690644ULL, 210406774251925ULL, 1965845668185599ULL, 3059839433804731ULL,
+    1933300510683631ULL, 150696600689211ULL, 4069293682158567ULL, 4346344602660044ULL,
+    312200249664561ULL, 2495020807621840ULL, 1912707714385ULL, 299345978159762ULL,
+    1164752722686920ULL, 225322433710338ULL, 3128747381283759ULL, 275659067815583ULL,
+    1489671057429039ULL, 1567693343342676ULL, 921672046098071ULL, 3707418899384085ULL,
+    54646424931593ULL, 4026733380127147ULL, 2933435393699231ULL, 3356593659521967ULL,
+    3637750749325529ULL, 232939412379045ULL, 2298399636043069ULL, 270361546063041ULL,
+    2523933572551420ULL, 3456896091572950ULL, 185447004732850ULL, 429322937697821ULL,
+    2579704215668222ULL, 695065378803349ULL, 3987916247731243ULL, 255159546348233ULL,
+    3057777929921282ULL, 1608970699916312ULL, 1902369623063807ULL, 1413619643652777ULL,
+    94983996321227ULL, 2832873179548050ULL, 4335430233622555ULL, 1559023976028843ULL,
+    3297181988648895ULL, 100072021232323ULL, 2124984034109675ULL, 4501252835618918ULL,
+    2053336899483297ULL, 638807226463876ULL, 278445213600634ULL, 2311236445660555ULL,
+    303317664040012ULL, 2659353858089024ULL, 3598827423980130ULL, 176059343827873ULL,
+    3891639526275437ULL, 252823982819463ULL, 3404823300622345ULL, 2758370772497456ULL,
+    91397496598783ULL, 2248661144141892ULL, 491087075271969ULL, 1786344894571315ULL,
+    452497694885923ULL, 34039628873357ULL, 2116503165025197ULL, 4436733709429923ULL,
+    3045800776819238ULL, 1385518906078375ULL, 110495603336764ULL, 4051447296249587ULL,
+    1103557421498625ULL, 1840785058439622ULL, 425322753992314ULL, 98330046771676ULL,
+    365407468686431ULL, 2611246859977123ULL, 3050253933135339ULL, 1006482220896688ULL,
+    166818196428389ULL, 3415236093104372ULL, 1762308883882288ULL, 1327828123094558ULL,
+    3403946425556706ULL, 96503464455441ULL, 3893015304031471ULL, 3740839477490397ULL,
+    2411470812852231ULL, 940927462436211ULL, 163825285911099ULL, 1622441495640386ULL,
+    850224095680266ULL, 76199085900939ULL, 1941852365144042ULL, 140326673652807ULL,
+    3161611011249524ULL, 317297150009965ULL, 2145053259340619ULL, 2180498176457552ULL,
+    38457740506224ULL, 394174899129468ULL, 2687474560485245ULL, 1542175980184516ULL,
+    1628502671124819ULL, 48477401124385ULL, 4474181600025082ULL, 2142747956365708ULL,
+    1638299432475478ULL, 2005869320353249ULL, 112292630760956ULL, 1887521965171588ULL,
+    457587531429696ULL, 840994209504042ULL, 4268060856325798ULL, 195597993440388ULL,
+    4148484749020338ULL, 2074885000909672ULL, 2309839019263165ULL, 2087616209681024ULL,
+    257214370719966ULL, 2331363508376581ULL, 1233124357504711ULL, 2849542202650296ULL,
+    3790982825325736ULL, 13381453503890ULL, 1665246594531069ULL, 4165624287443904ULL,
+    3418759698027493ULL, 2118493255117399ULL, 136249206366067ULL, 4064050233283309ULL,
+    1368779887911300ULL, 4370550759530269ULL, 66992990631341ULL, 84442368922270ULL,
+    2139322635321394ULL, 2076163483726795ULL, 657097866349103ULL, 2095579409488071ULL,
+    226525774791341ULL, 4445744257665359ULL, 2035752839278107ULL, 1998242662838304ULL,
+    1601548415521694ULL, 151297684296198ULL, 1350963039017303ULL, 2624916349548281ULL,
+    2018863259670197ULL, 2717274357461290ULL, 94024796961533ULL, 711335520409111ULL,
+    4322093765820263ULL, 2041650358174649ULL, 3439791603157577ULL, 179292018616267ULL,
+    2436436921286669ULL, 3905268797208340ULL, 2829194895162985ULL, 1355175382191543ULL,
+    55128779761539ULL, 2648428998786922ULL, 869805912573515ULL, 3706708942847864ULL,
+    2785288916584667ULL, 37156862850147ULL, 1422245336293228ULL, 4497066058933021ULL,
+    85588912978349ULL, 2616252221194611ULL, 53506393720989ULL, 3727539190732644ULL,
+    872132446545237ULL, 933583590986077ULL, 3794591170581203ULL, 167875550514069ULL,
+    2267466834993297ULL, 3072652681756816ULL, 2108499037430803ULL, 1606735192928366ULL,
+    72339568815255ULL, 3258484260684219ULL, 3277927277719855ULL, 2459560373011535ULL,
+    1672794293294033ULL, 227460934880669ULL, 3702454405413705ULL, 106168148441676ULL,
+    1356617643071159ULL, 3280896569942762ULL, 142618711614302ULL, 4291782740862057ULL,
+    4141020884874235ULL, 3720787221267125ULL, 552884940089351ULL, 174626154407180ULL,
+    972071013326540ULL, 4458530419931903ULL, 4435168973822858ULL, 1902967548748411ULL,
+    53007977605840ULL, 2453997334323925ULL, 3653077937283262ULL, 850660265046356ULL,
+    312721924805450ULL, 268503679240683ULL, 256960167714122ULL, 1474492507858350ULL,
+    2456345526438488ULL, 3686029507160255ULL, 279158933010398ULL, 3646946293948063ULL,
+    704477527214036ULL, 3387744169891031ULL, 3772622670980241ULL, 136368897543304ULL,
+    3744894052577607ULL, 1976007214443430ULL, 2090045379763451ULL, 968565474458988ULL,
+    234295114806066ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Poly1305_128.h b/include/internal/Hacl_MAC_Poly1305.h
similarity index 77%
rename from include/internal/Hacl_Poly1305_128.h
rename to include/internal/Hacl_MAC_Poly1305.h
index b9964714..29e1734a 100644
--- a/include/internal/Hacl_Poly1305_128.h
+++ b/include/internal/Hacl_MAC_Poly1305.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __internal_Hacl_Poly1305_128_H
-#define __internal_Hacl_Poly1305_128_H
+#ifndef __internal_Hacl_MAC_Poly1305_H
+#define __internal_Hacl_MAC_Poly1305_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,21 +35,15 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "../Hacl_Poly1305_128.h"
-#include "libintvector.h"
+#include "../Hacl_MAC_Poly1305.h"
 
-void
-Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b);
+void Hacl_MAC_Poly1305_poly1305_init(uint64_t *ctx, uint8_t *key);
 
-void
-Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
-  Lib_IntVector_Intrinsics_vec128 *out,
-  Lib_IntVector_Intrinsics_vec128 *p
-);
+void Hacl_MAC_Poly1305_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __internal_Hacl_Poly1305_128_H_DEFINED
+#define __internal_Hacl_MAC_Poly1305_H_DEFINED
 #endif
diff --git a/include/msvc/internal/Hacl_Poly1305_128.h b/include/internal/Hacl_MAC_Poly1305_Simd128.h
similarity index 73%
rename from include/msvc/internal/Hacl_Poly1305_128.h
rename to include/internal/Hacl_MAC_Poly1305_Simd128.h
index b9964714..fe120e43 100644
--- a/include/msvc/internal/Hacl_Poly1305_128.h
+++ b/include/internal/Hacl_MAC_Poly1305_Simd128.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __internal_Hacl_Poly1305_128_H
-#define __internal_Hacl_Poly1305_128_H
+#ifndef __internal_Hacl_MAC_Poly1305_Simd128_H
+#define __internal_Hacl_MAC_Poly1305_Simd128_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,21 +35,30 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "../Hacl_Poly1305_128.h"
+#include "../Hacl_MAC_Poly1305_Simd128.h"
 #include "libintvector.h"
 
-void
-Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b);
+void Hacl_MAC_Poly1305_Simd128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b);
 
 void
-Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
+Hacl_MAC_Poly1305_Simd128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128 *out,
   Lib_IntVector_Intrinsics_vec128 *p
 );
 
+void
+Hacl_MAC_Poly1305_Simd128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key);
+
+void
+Hacl_MAC_Poly1305_Simd128_poly1305_finish(
+  uint8_t *tag,
+  uint8_t *key,
+  Lib_IntVector_Intrinsics_vec128 *ctx
+);
+
 #if defined(__cplusplus)
 }
 #endif
 
-#define __internal_Hacl_Poly1305_128_H_DEFINED
+#define __internal_Hacl_MAC_Poly1305_Simd128_H_DEFINED
 #endif
diff --git a/include/msvc/internal/Hacl_Poly1305_256.h b/include/internal/Hacl_MAC_Poly1305_Simd256.h
similarity index 73%
rename from include/msvc/internal/Hacl_Poly1305_256.h
rename to include/internal/Hacl_MAC_Poly1305_Simd256.h
index 21d78b16..7bf106c1 100644
--- a/include/msvc/internal/Hacl_Poly1305_256.h
+++ b/include/internal/Hacl_MAC_Poly1305_Simd256.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __internal_Hacl_Poly1305_256_H
-#define __internal_Hacl_Poly1305_256_H
+#ifndef __internal_Hacl_MAC_Poly1305_Simd256_H
+#define __internal_Hacl_MAC_Poly1305_Simd256_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,21 +35,30 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "../Hacl_Poly1305_256.h"
+#include "../Hacl_MAC_Poly1305_Simd256.h"
 #include "libintvector.h"
 
-void
-Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b);
+void Hacl_MAC_Poly1305_Simd256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b);
 
 void
-Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
+Hacl_MAC_Poly1305_Simd256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 *out,
   Lib_IntVector_Intrinsics_vec256 *p
 );
 
+void
+Hacl_MAC_Poly1305_Simd256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key);
+
+void
+Hacl_MAC_Poly1305_Simd256_poly1305_finish(
+  uint8_t *tag,
+  uint8_t *key,
+  Lib_IntVector_Intrinsics_vec256 *ctx
+);
+
 #if defined(__cplusplus)
 }
 #endif
 
-#define __internal_Hacl_Poly1305_256_H_DEFINED
+#define __internal_Hacl_MAC_Poly1305_Simd256_H_DEFINED
 #endif
diff --git a/include/internal/Hacl_P256_PrecompTable.h b/include/internal/Hacl_P256_PrecompTable.h
index f185c2be..c852ef8c 100644
--- a/include/internal/Hacl_P256_PrecompTable.h
+++ b/include/internal/Hacl_P256_PrecompTable.h
@@ -39,476 +39,360 @@ static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_basepoint_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U,
-    (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U,
-    (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U,
-    (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U,
-    (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U,
-    (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U,
-    (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U,
-    (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U,
-    (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U,
-    (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U,
-    (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U,
-    (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U,
-    (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U,
-    (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U,
-    (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U,
-    (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U,
-    (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U,
-    (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U,
-    (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U,
-    (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U,
-    (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U,
-    (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U,
-    (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U,
-    (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U,
-    (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U,
-    (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U,
-    (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U,
-    (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U,
-    (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U,
-    (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U,
-    (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U,
-    (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U,
-    (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U,
-    (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U,
-    (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U,
-    (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U,
-    (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U,
-    (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U,
-    (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U,
-    (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U,
-    (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U,
-    (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U,
-    (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U,
-    (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U,
-    (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U,
-    (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U,
-    (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U,
-    (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U,
-    (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U,
-    (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U,
-    (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U,
-    (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U,
-    (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U,
-    (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U,
-    (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U,
-    (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U,
-    (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U,
-    (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U,
-    (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U,
-    (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U,
-    (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U,
-    (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U,
-    (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U,
-    (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U,
-    (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U,
-    (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U,
-    (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U,
-    (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 8784043285714375740ULL, 8483257759279461889ULL, 8789745728267363600ULL,
+    1770019616739251654ULL, 15992936863339206154ULL, 10037038012062884956ULL,
+    15197544864945402661ULL, 9615747158586711429ULL, 1ULL, 18446744069414584320ULL,
+    18446744073709551615ULL, 4294967294ULL, 10634854829044225757ULL, 351552716085025155ULL,
+    10645315080955407736ULL, 3609262091244858135ULL, 15760741698986874125ULL,
+    14936374388219697827ULL, 15751360096993017895ULL, 18012233706239762398ULL,
+    1993877568177495041ULL, 10345888787846536528ULL, 7746511691117935375ULL,
+    14517043990409914413ULL, 14122549297570634151ULL, 16934610359517083771ULL,
+    5724511325497097418ULL, 8983432969107448705ULL, 2687429970334080245ULL, 16525396802810050288ULL,
+    7602596488871585854ULL, 4813919589149203084ULL, 7680395813780804519ULL, 6687709583048023590ULL,
+    18086445169104142027ULL, 9637814708330203929ULL, 14785108459960679090ULL,
+    3838023279095023581ULL, 3555615526157830307ULL, 5177066488380472871ULL, 18218186719108038403ULL,
+    16281556341699656105ULL, 1524227924561461191ULL, 4148060517641909597ULL, 2858290374115363433ULL,
+    8942772026334130620ULL, 3034451298319885113ULL, 8447866036736640940ULL, 11204933433076256578ULL,
+    18333595740249588297ULL, 8259597024804538246ULL, 9539734295777539786ULL, 9797290423046626413ULL,
+    5777303437849646537ULL, 8739356909899132020ULL, 14815960973766782158ULL,
+    15286581798204509801ULL, 17597362577777019682ULL, 13259283710820519742ULL,
+    10501322996899164670ULL, 1221138904338319642ULL, 14586685489551951885ULL, 895326705426031212ULL,
+    14398171728560617847ULL, 9592550823745097391ULL, 17240998489162206026ULL,
+    8085479283308189196ULL, 14844657737893882826ULL, 15923425394150618234ULL,
+    2997808084773249525ULL, 494323555453660587ULL, 1215695327517794764ULL, 9476207381098391690ULL,
+    7480789678419122995ULL, 15212230329321082489ULL, 436189395349576388ULL, 17377474396456660834ULL,
+    15237013929655017939ULL, 11444428846883781676ULL, 5112749694521428575ULL, 950829367509872073ULL,
+    17665036182057559519ULL, 17205133339690002313ULL, 16233765170251334549ULL,
+    10122775683257972591ULL, 3352514236455632420ULL, 9143148522359954691ULL, 601191684005658860ULL,
+    13398772186646349998ULL, 15512696600132928431ULL, 9128416073728948653ULL,
+    11233051033546138578ULL, 6769345682610122833ULL, 10823233224575054288ULL,
+    9997725227559980175ULL, 6733425642852897415ULL, 16302206918151466066ULL, 1669330822143265921ULL,
+    2661645605036546002ULL, 17182558479745802165ULL, 1165082692376932040ULL, 9470595929011488359ULL,
+    6142147329285324932ULL, 4829075085998111287ULL, 10231370681107338930ULL, 9591876895322495239ULL,
+    10316468561384076618ULL, 11592503647238064235ULL, 13395813606055179632ULL,
+    511127033980815508ULL, 12434976573147649880ULL, 3425094795384359127ULL, 6816971736303023445ULL,
+    15444670609021139344ULL, 9464349818322082360ULL, 16178216413042376883ULL,
+    9595540370774317348ULL, 7229365182662875710ULL, 4601177649460012843ULL, 5455046447382487090ULL,
+    10854066421606187521ULL, 15913416821879788071ULL, 2297365362023460173ULL,
+    2603252216454941350ULL, 6768791943870490934ULL, 15705936687122754810ULL, 9537096567546600694ULL,
+    17580538144855035062ULL, 4496542856965746638ULL, 8444341625922124942ULL,
+    12191263903636183168ULL, 17427332907535974165ULL, 14307569739254103736ULL,
+    13900598742063266169ULL, 7176996424355977650ULL, 5709008170379717479ULL,
+    14471312052264549092ULL, 1464519909491759867ULL, 3328154641049602121ULL,
+    13020349337171136774ULL, 2772166279972051938ULL, 10854476939425975292ULL,
+    1967189930534630940ULL, 2802919076529341959ULL, 14792226094833519208ULL,
+    14675640928566522177ULL, 14838974364643800837ULL, 17631460696099549980ULL,
+    17434186275364935469ULL, 2665648200587705473ULL, 13202122464492564051ULL,
+    7576287350918073341ULL, 2272206013910186424ULL, 14558761641743937843ULL, 5675729149929979729ULL,
+    9043135187561613166ULL, 11750149293830589225ULL, 740555197954307911ULL, 9871738005087190699ULL,
+    17178667634283502053ULL, 18046255991533013265ULL, 4458222096988430430ULL,
+    8452427758526311627ULL, 13825286929656615266ULL, 13956286357198391218ULL,
+    15875692916799995079ULL, 10634895319157013920ULL, 13230116118036304207ULL,
+    8795317393614625606ULL, 7001710806858862020ULL, 7949746088586183478ULL, 14677556044923602317ULL,
+    11184023437485843904ULL, 11215864722023085094ULL, 6444464081471519014ULL,
+    1706241174022415217ULL, 8243975633057550613ULL, 15502902453836085864ULL, 3799182188594003953ULL,
+    3538840175098724094ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1499621593102562565U,
-    (uint64_t)16692369783039433128U, (uint64_t)15337520135922861848U,
-    (uint64_t)5455737214495366228U, (uint64_t)17827017231032529600U,
-    (uint64_t)12413621606240782649U, (uint64_t)2290483008028286132U,
-    (uint64_t)15752017553340844820U, (uint64_t)4846430910634234874U,
-    (uint64_t)10861682798464583253U, (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U,
-    (uint64_t)9866710912401645115U, (uint64_t)1162548847543228595U, (uint64_t)7649967190445130486U,
-    (uint64_t)5212340432230915749U, (uint64_t)7572620550182916491U, (uint64_t)14876145112448665096U,
-    (uint64_t)2063227348838176167U, (uint64_t)3519435548295415847U, (uint64_t)8390400282019023103U,
-    (uint64_t)17666843593163037841U, (uint64_t)9450204148816496323U, (uint64_t)8483374507652916768U,
-    (uint64_t)6254661047265818424U, (uint64_t)16382127809582285023U, (uint64_t)125359443771153172U,
-    (uint64_t)1374336701588437897U, (uint64_t)11362596098420127726U, (uint64_t)2101654420738681387U,
-    (uint64_t)12772780342444840510U, (uint64_t)12546934328908550060U,
-    (uint64_t)8331880412333790397U, (uint64_t)11687262051473819904U, (uint64_t)8926848496503457587U,
-    (uint64_t)9603974142010467857U, (uint64_t)13199952163826973175U, (uint64_t)2189856264898797734U,
-    (uint64_t)11356074861870267226U, (uint64_t)2027714896422561895U, (uint64_t)5261606367808050149U,
-    (uint64_t)153855954337762312U, (uint64_t)6375919692894573986U, (uint64_t)12364041207536146533U,
-    (uint64_t)1891896010455057160U, (uint64_t)1568123795087313171U, (uint64_t)18138710056556660101U,
-    (uint64_t)6004886947510047736U, (uint64_t)4811859325589542932U, (uint64_t)3618763430148954981U,
-    (uint64_t)11434521746258554122U, (uint64_t)10086341535864049427U,
-    (uint64_t)8073421629570399570U, (uint64_t)12680586148814729338U, (uint64_t)9619958020761569612U,
-    (uint64_t)15827203580658384478U, (uint64_t)12832694810937550406U,
-    (uint64_t)14977975484447400910U, (uint64_t)5478002389061063653U,
-    (uint64_t)14731136312639060880U, (uint64_t)4317867687275472033U, (uint64_t)6642650962855259884U,
-    (uint64_t)2514254944289495285U, (uint64_t)14231405641534478436U, (uint64_t)4045448346091518946U,
-    (uint64_t)8985477013445972471U, (uint64_t)8869039454457032149U, (uint64_t)4356978486208692970U,
-    (uint64_t)10805288613335538577U, (uint64_t)12832353127812502042U,
-    (uint64_t)4576590051676547490U, (uint64_t)6728053735138655107U, (uint64_t)17814206719173206184U,
-    (uint64_t)79790138573994940U, (uint64_t)17920293215101822267U, (uint64_t)13422026625585728864U,
-    (uint64_t)5018058010492547271U, (uint64_t)110232326023384102U, (uint64_t)10834264070056942976U,
-    (uint64_t)15222249086119088588U, (uint64_t)15119439519142044997U,
-    (uint64_t)11655511970063167313U, (uint64_t)1614477029450566107U, (uint64_t)3619322817271059794U,
-    (uint64_t)9352862040415412867U, (uint64_t)14017522553242747074U,
-    (uint64_t)13138513643674040327U, (uint64_t)3610195242889455765U, (uint64_t)8371069193996567291U,
-    (uint64_t)12670227996544662654U, (uint64_t)1205961025092146303U,
-    (uint64_t)13106709934003962112U, (uint64_t)4350113471327723407U,
-    (uint64_t)15060941403739680459U, (uint64_t)13639127647823205030U,
-    (uint64_t)10790943339357725715U, (uint64_t)498760574280648264U, (uint64_t)17922071907832082887U,
-    (uint64_t)15122670976670152145U, (uint64_t)6275027991110214322U, (uint64_t)7250912847491816402U,
-    (uint64_t)15206617260142982380U, (uint64_t)3385668313694152877U,
-    (uint64_t)17522479771766801905U, (uint64_t)2965919117476170655U, (uint64_t)1553238516603269404U,
-    (uint64_t)5820770015631050991U, (uint64_t)4999445222232605348U, (uint64_t)9245650860833717444U,
-    (uint64_t)1508811811724230728U, (uint64_t)5190684913765614385U, (uint64_t)15692927070934536166U,
-    (uint64_t)12981978499190500902U, (uint64_t)5143491963193394698U, (uint64_t)7705698092144084129U,
-    (uint64_t)581120653055084783U, (uint64_t)13886552864486459714U, (uint64_t)6290301270652587255U,
-    (uint64_t)8663431529954393128U, (uint64_t)17033405846475472443U, (uint64_t)5206780355442651635U,
-    (uint64_t)12580364474736467688U, (uint64_t)17934601912005283310U,
-    (uint64_t)15119491731028933652U, (uint64_t)17848231399859044858U,
-    (uint64_t)4427673319524919329U, (uint64_t)2673607337074368008U, (uint64_t)14034876464294699949U,
-    (uint64_t)10938948975420813697U, (uint64_t)15202340615298669183U,
-    (uint64_t)5496603454069431071U, (uint64_t)2486526142064906845U, (uint64_t)4507882119510526802U,
-    (uint64_t)13888151172411390059U, (uint64_t)15049027856908071726U,
-    (uint64_t)9667231543181973158U, (uint64_t)6406671575277563202U, (uint64_t)3395801050331215139U,
-    (uint64_t)9813607433539108308U, (uint64_t)2681417728820980381U, (uint64_t)18407064643927113994U,
-    (uint64_t)7707177692113485527U, (uint64_t)14218149384635317074U, (uint64_t)3658668346206375919U,
-    (uint64_t)15404713991002362166U, (uint64_t)10152074687696195207U,
-    (uint64_t)10926946599582128139U, (uint64_t)16907298600007085320U,
-    (uint64_t)16544287219664720279U, (uint64_t)11007075933432813205U,
-    (uint64_t)8652245965145713599U, (uint64_t)7857626748965990384U, (uint64_t)5602306604520095870U,
-    (uint64_t)2525139243938658618U, (uint64_t)14405696176872077447U,
-    (uint64_t)18432270482137885332U, (uint64_t)9913880809120071177U,
-    (uint64_t)16896141737831216972U, (uint64_t)7484791498211214829U,
-    (uint64_t)15635259968266497469U, (uint64_t)8495118537612215624U, (uint64_t)4915477980562575356U,
-    (uint64_t)16453519279754924350U, (uint64_t)14462108244565406969U,
-    (uint64_t)14837837755237096687U, (uint64_t)14130171078892575346U,
-    (uint64_t)15423793222528491497U, (uint64_t)5460399262075036084U,
-    (uint64_t)16085440580308415349U, (uint64_t)26873200736954488U, (uint64_t)5603655807457499550U,
-    (uint64_t)3342202915871129617U, (uint64_t)1604413932150236626U, (uint64_t)9684226585089458974U,
-    (uint64_t)1213229904006618539U, (uint64_t)6782978662408837236U, (uint64_t)11197029877749307372U,
-    (uint64_t)14085968786551657744U, (uint64_t)17352273610494009342U,
-    (uint64_t)7876582961192434984U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1499621593102562565ULL, 16692369783039433128ULL,
+    15337520135922861848ULL, 5455737214495366228ULL, 17827017231032529600ULL,
+    12413621606240782649ULL, 2290483008028286132ULL, 15752017553340844820ULL,
+    4846430910634234874ULL, 10861682798464583253ULL, 15404737222404363049ULL, 363586619281562022ULL,
+    9866710912401645115ULL, 1162548847543228595ULL, 7649967190445130486ULL, 5212340432230915749ULL,
+    7572620550182916491ULL, 14876145112448665096ULL, 2063227348838176167ULL, 3519435548295415847ULL,
+    8390400282019023103ULL, 17666843593163037841ULL, 9450204148816496323ULL, 8483374507652916768ULL,
+    6254661047265818424ULL, 16382127809582285023ULL, 125359443771153172ULL, 1374336701588437897ULL,
+    11362596098420127726ULL, 2101654420738681387ULL, 12772780342444840510ULL,
+    12546934328908550060ULL, 8331880412333790397ULL, 11687262051473819904ULL,
+    8926848496503457587ULL, 9603974142010467857ULL, 13199952163826973175ULL, 2189856264898797734ULL,
+    11356074861870267226ULL, 2027714896422561895ULL, 5261606367808050149ULL, 153855954337762312ULL,
+    6375919692894573986ULL, 12364041207536146533ULL, 1891896010455057160ULL, 1568123795087313171ULL,
+    18138710056556660101ULL, 6004886947510047736ULL, 4811859325589542932ULL, 3618763430148954981ULL,
+    11434521746258554122ULL, 10086341535864049427ULL, 8073421629570399570ULL,
+    12680586148814729338ULL, 9619958020761569612ULL, 15827203580658384478ULL,
+    12832694810937550406ULL, 14977975484447400910ULL, 5478002389061063653ULL,
+    14731136312639060880ULL, 4317867687275472033ULL, 6642650962855259884ULL, 2514254944289495285ULL,
+    14231405641534478436ULL, 4045448346091518946ULL, 8985477013445972471ULL, 8869039454457032149ULL,
+    4356978486208692970ULL, 10805288613335538577ULL, 12832353127812502042ULL,
+    4576590051676547490ULL, 6728053735138655107ULL, 17814206719173206184ULL, 79790138573994940ULL,
+    17920293215101822267ULL, 13422026625585728864ULL, 5018058010492547271ULL, 110232326023384102ULL,
+    10834264070056942976ULL, 15222249086119088588ULL, 15119439519142044997ULL,
+    11655511970063167313ULL, 1614477029450566107ULL, 3619322817271059794ULL, 9352862040415412867ULL,
+    14017522553242747074ULL, 13138513643674040327ULL, 3610195242889455765ULL,
+    8371069193996567291ULL, 12670227996544662654ULL, 1205961025092146303ULL,
+    13106709934003962112ULL, 4350113471327723407ULL, 15060941403739680459ULL,
+    13639127647823205030ULL, 10790943339357725715ULL, 498760574280648264ULL,
+    17922071907832082887ULL, 15122670976670152145ULL, 6275027991110214322ULL,
+    7250912847491816402ULL, 15206617260142982380ULL, 3385668313694152877ULL,
+    17522479771766801905ULL, 2965919117476170655ULL, 1553238516603269404ULL, 5820770015631050991ULL,
+    4999445222232605348ULL, 9245650860833717444ULL, 1508811811724230728ULL, 5190684913765614385ULL,
+    15692927070934536166ULL, 12981978499190500902ULL, 5143491963193394698ULL,
+    7705698092144084129ULL, 581120653055084783ULL, 13886552864486459714ULL, 6290301270652587255ULL,
+    8663431529954393128ULL, 17033405846475472443ULL, 5206780355442651635ULL,
+    12580364474736467688ULL, 17934601912005283310ULL, 15119491731028933652ULL,
+    17848231399859044858ULL, 4427673319524919329ULL, 2673607337074368008ULL,
+    14034876464294699949ULL, 10938948975420813697ULL, 15202340615298669183ULL,
+    5496603454069431071ULL, 2486526142064906845ULL, 4507882119510526802ULL, 13888151172411390059ULL,
+    15049027856908071726ULL, 9667231543181973158ULL, 6406671575277563202ULL, 3395801050331215139ULL,
+    9813607433539108308ULL, 2681417728820980381ULL, 18407064643927113994ULL, 7707177692113485527ULL,
+    14218149384635317074ULL, 3658668346206375919ULL, 15404713991002362166ULL,
+    10152074687696195207ULL, 10926946599582128139ULL, 16907298600007085320ULL,
+    16544287219664720279ULL, 11007075933432813205ULL, 8652245965145713599ULL,
+    7857626748965990384ULL, 5602306604520095870ULL, 2525139243938658618ULL, 14405696176872077447ULL,
+    18432270482137885332ULL, 9913880809120071177ULL, 16896141737831216972ULL,
+    7484791498211214829ULL, 15635259968266497469ULL, 8495118537612215624ULL, 4915477980562575356ULL,
+    16453519279754924350ULL, 14462108244565406969ULL, 14837837755237096687ULL,
+    14130171078892575346ULL, 15423793222528491497ULL, 5460399262075036084ULL,
+    16085440580308415349ULL, 26873200736954488ULL, 5603655807457499550ULL, 3342202915871129617ULL,
+    1604413932150236626ULL, 9684226585089458974ULL, 1213229904006618539ULL, 6782978662408837236ULL,
+    11197029877749307372ULL, 14085968786551657744ULL, 17352273610494009342ULL,
+    7876582961192434984ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)14619254753077084366U,
-    (uint64_t)13913835116514008593U, (uint64_t)15060744674088488145U,
-    (uint64_t)17668414598203068685U, (uint64_t)10761169236902342334U,
-    (uint64_t)15467027479157446221U, (uint64_t)14989185522423469618U,
-    (uint64_t)14354539272510107003U, (uint64_t)14298211796392133693U,
-    (uint64_t)13270323784253711450U, (uint64_t)13380964971965046957U,
-    (uint64_t)8686204248456909699U, (uint64_t)17434630286744937066U, (uint64_t)1355903775279084720U,
-    (uint64_t)7554695053550308662U, (uint64_t)11354971222741863570U, (uint64_t)564601613420749879U,
-    (uint64_t)8466325837259054896U, (uint64_t)10752965181772434263U,
-    (uint64_t)11405876547368426319U, (uint64_t)13791894568738930940U,
-    (uint64_t)8230587134406354675U, (uint64_t)12415514098722758608U,
-    (uint64_t)18414183046995786744U, (uint64_t)15508000368227372870U,
-    (uint64_t)5781062464627999307U, (uint64_t)15339429052219195590U,
-    (uint64_t)16038703753810741903U, (uint64_t)9587718938298980714U, (uint64_t)4822658817952386407U,
-    (uint64_t)1376351024833260660U, (uint64_t)1120174910554766702U, (uint64_t)1730170933262569274U,
-    (uint64_t)5187428548444533500U, (uint64_t)16242053503368957131U, (uint64_t)3036811119519868279U,
-    (uint64_t)1760267587958926638U, (uint64_t)170244572981065185U, (uint64_t)8063080791967388171U,
-    (uint64_t)4824892826607692737U, (uint64_t)16286391083472040552U,
-    (uint64_t)11945158615253358747U, (uint64_t)14096887760410224200U,
-    (uint64_t)1613720831904557039U, (uint64_t)14316966673761197523U,
-    (uint64_t)17411006201485445341U, (uint64_t)8112301506943158801U, (uint64_t)2069889233927989984U,
-    (uint64_t)10082848378277483927U, (uint64_t)3609691194454404430U, (uint64_t)6110437205371933689U,
-    (uint64_t)9769135977342231601U, (uint64_t)11977962151783386478U,
-    (uint64_t)18088718692559983573U, (uint64_t)11741637975753055U, (uint64_t)11110390325701582190U,
-    (uint64_t)1341402251566067019U, (uint64_t)3028229550849726478U, (uint64_t)10438984083997451310U,
-    (uint64_t)12730851885100145709U, (uint64_t)11524169532089894189U,
-    (uint64_t)4523375903229602674U, (uint64_t)2028602258037385622U, (uint64_t)17082839063089388410U,
-    (uint64_t)6103921364634113167U, (uint64_t)17066180888225306102U,
-    (uint64_t)11395680486707876195U, (uint64_t)10952892272443345484U,
-    (uint64_t)8792831960605859401U, (uint64_t)14194485427742325139U,
-    (uint64_t)15146020821144305250U, (uint64_t)1654766014957123343U, (uint64_t)7955526243090948551U,
-    (uint64_t)3989277566080493308U, (uint64_t)12229385116397931231U,
-    (uint64_t)13430548930727025562U, (uint64_t)3434892688179800602U, (uint64_t)8431998794645622027U,
-    (uint64_t)12132530981596299272U, (uint64_t)2289461608863966999U,
-    (uint64_t)18345870950201487179U, (uint64_t)13517947207801901576U,
-    (uint64_t)5213113244172561159U, (uint64_t)17632986594098340879U, (uint64_t)4405251818133148856U,
-    (uint64_t)11783009269435447793U, (uint64_t)9332138983770046035U,
-    (uint64_t)12863411548922539505U, (uint64_t)3717030292816178224U,
-    (uint64_t)10026078446427137374U, (uint64_t)11167295326594317220U,
-    (uint64_t)12425328773141588668U, (uint64_t)5760335125172049352U, (uint64_t)9016843701117277863U,
-    (uint64_t)5657892835694680172U, (uint64_t)11025130589305387464U, (uint64_t)1368484957977406173U,
-    (uint64_t)17361351345281258834U, (uint64_t)1907113641956152700U,
-    (uint64_t)16439233413531427752U, (uint64_t)5893322296986588932U,
-    (uint64_t)14000206906171746627U, (uint64_t)14979266987545792900U,
-    (uint64_t)6926291766898221120U, (uint64_t)7162023296083360752U, (uint64_t)14762747553625382529U,
-    (uint64_t)12610831658612406849U, (uint64_t)10462926899548715515U,
-    (uint64_t)4794017723140405312U, (uint64_t)5234438200490163319U, (uint64_t)8019519110339576320U,
-    (uint64_t)7194604241290530100U, (uint64_t)12626770134810813246U,
-    (uint64_t)10793074474236419890U, (uint64_t)11323224347913978783U,
-    (uint64_t)16831128015895380245U, (uint64_t)18323094195124693378U,
-    (uint64_t)2361097165281567692U, (uint64_t)15755578675014279498U,
-    (uint64_t)14289876470325854580U, (uint64_t)12856787656093616839U,
-    (uint64_t)3578928531243900594U, (uint64_t)3847532758790503699U, (uint64_t)8377953190224748743U,
-    (uint64_t)3314546646092744596U, (uint64_t)800810188859334358U, (uint64_t)4626344124229343596U,
-    (uint64_t)6620381605850876621U, (uint64_t)11422073570955989527U,
-    (uint64_t)12676813626484814469U, (uint64_t)16725029886764122240U,
-    (uint64_t)16648497372773830008U, (uint64_t)9135702594931291048U,
-    (uint64_t)16080949688826680333U, (uint64_t)11528096561346602947U,
-    (uint64_t)2632498067099740984U, (uint64_t)11583842699108800714U, (uint64_t)8378404864573610526U,
-    (uint64_t)1076560261627788534U, (uint64_t)13836015994325032828U,
-    (uint64_t)11234295937817067909U, (uint64_t)5893659808396722708U,
-    (uint64_t)11277421142886984364U, (uint64_t)8968549037166726491U,
-    (uint64_t)14841374331394032822U, (uint64_t)9967344773947889341U, (uint64_t)8799244393578496085U,
-    (uint64_t)5094686877301601410U, (uint64_t)8780316747074726862U, (uint64_t)9119697306829835718U,
-    (uint64_t)15381243327921855368U, (uint64_t)2686250164449435196U,
-    (uint64_t)16466917280442198358U, (uint64_t)13791704489163125216U,
-    (uint64_t)16955859337117924272U, (uint64_t)17112836394923783642U,
-    (uint64_t)4639176427338618063U, (uint64_t)16770029310141094964U,
-    (uint64_t)11049953922966416185U, (uint64_t)12012669590884098968U,
-    (uint64_t)4859326885929417214U, (uint64_t)896380084392586061U, (uint64_t)7153028362977034008U,
-    (uint64_t)10540021163316263301U, (uint64_t)9318277998512936585U,
-    (uint64_t)18344496977694796523U, (uint64_t)11374737400567645494U,
-    (uint64_t)17158800051138212954U, (uint64_t)18343197867863253153U,
-    (uint64_t)18204799297967861226U, (uint64_t)15798973531606348828U,
-    (uint64_t)9870158263408310459U, (uint64_t)17578869832774612627U, (uint64_t)8395748875822696932U,
-    (uint64_t)15310679007370670872U, (uint64_t)11205576736030808860U,
-    (uint64_t)10123429210002838967U, (uint64_t)5910544144088393959U,
-    (uint64_t)14016615653353687369U, (uint64_t)11191676704772957822U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 14619254753077084366ULL, 13913835116514008593ULL,
+    15060744674088488145ULL, 17668414598203068685ULL, 10761169236902342334ULL,
+    15467027479157446221ULL, 14989185522423469618ULL, 14354539272510107003ULL,
+    14298211796392133693ULL, 13270323784253711450ULL, 13380964971965046957ULL,
+    8686204248456909699ULL, 17434630286744937066ULL, 1355903775279084720ULL, 7554695053550308662ULL,
+    11354971222741863570ULL, 564601613420749879ULL, 8466325837259054896ULL, 10752965181772434263ULL,
+    11405876547368426319ULL, 13791894568738930940ULL, 8230587134406354675ULL,
+    12415514098722758608ULL, 18414183046995786744ULL, 15508000368227372870ULL,
+    5781062464627999307ULL, 15339429052219195590ULL, 16038703753810741903ULL,
+    9587718938298980714ULL, 4822658817952386407ULL, 1376351024833260660ULL, 1120174910554766702ULL,
+    1730170933262569274ULL, 5187428548444533500ULL, 16242053503368957131ULL, 3036811119519868279ULL,
+    1760267587958926638ULL, 170244572981065185ULL, 8063080791967388171ULL, 4824892826607692737ULL,
+    16286391083472040552ULL, 11945158615253358747ULL, 14096887760410224200ULL,
+    1613720831904557039ULL, 14316966673761197523ULL, 17411006201485445341ULL,
+    8112301506943158801ULL, 2069889233927989984ULL, 10082848378277483927ULL, 3609691194454404430ULL,
+    6110437205371933689ULL, 9769135977342231601ULL, 11977962151783386478ULL,
+    18088718692559983573ULL, 11741637975753055ULL, 11110390325701582190ULL, 1341402251566067019ULL,
+    3028229550849726478ULL, 10438984083997451310ULL, 12730851885100145709ULL,
+    11524169532089894189ULL, 4523375903229602674ULL, 2028602258037385622ULL,
+    17082839063089388410ULL, 6103921364634113167ULL, 17066180888225306102ULL,
+    11395680486707876195ULL, 10952892272443345484ULL, 8792831960605859401ULL,
+    14194485427742325139ULL, 15146020821144305250ULL, 1654766014957123343ULL,
+    7955526243090948551ULL, 3989277566080493308ULL, 12229385116397931231ULL,
+    13430548930727025562ULL, 3434892688179800602ULL, 8431998794645622027ULL,
+    12132530981596299272ULL, 2289461608863966999ULL, 18345870950201487179ULL,
+    13517947207801901576ULL, 5213113244172561159ULL, 17632986594098340879ULL,
+    4405251818133148856ULL, 11783009269435447793ULL, 9332138983770046035ULL,
+    12863411548922539505ULL, 3717030292816178224ULL, 10026078446427137374ULL,
+    11167295326594317220ULL, 12425328773141588668ULL, 5760335125172049352ULL,
+    9016843701117277863ULL, 5657892835694680172ULL, 11025130589305387464ULL, 1368484957977406173ULL,
+    17361351345281258834ULL, 1907113641956152700ULL, 16439233413531427752ULL,
+    5893322296986588932ULL, 14000206906171746627ULL, 14979266987545792900ULL,
+    6926291766898221120ULL, 7162023296083360752ULL, 14762747553625382529ULL,
+    12610831658612406849ULL, 10462926899548715515ULL, 4794017723140405312ULL,
+    5234438200490163319ULL, 8019519110339576320ULL, 7194604241290530100ULL, 12626770134810813246ULL,
+    10793074474236419890ULL, 11323224347913978783ULL, 16831128015895380245ULL,
+    18323094195124693378ULL, 2361097165281567692ULL, 15755578675014279498ULL,
+    14289876470325854580ULL, 12856787656093616839ULL, 3578928531243900594ULL,
+    3847532758790503699ULL, 8377953190224748743ULL, 3314546646092744596ULL, 800810188859334358ULL,
+    4626344124229343596ULL, 6620381605850876621ULL, 11422073570955989527ULL,
+    12676813626484814469ULL, 16725029886764122240ULL, 16648497372773830008ULL,
+    9135702594931291048ULL, 16080949688826680333ULL, 11528096561346602947ULL,
+    2632498067099740984ULL, 11583842699108800714ULL, 8378404864573610526ULL, 1076560261627788534ULL,
+    13836015994325032828ULL, 11234295937817067909ULL, 5893659808396722708ULL,
+    11277421142886984364ULL, 8968549037166726491ULL, 14841374331394032822ULL,
+    9967344773947889341ULL, 8799244393578496085ULL, 5094686877301601410ULL, 8780316747074726862ULL,
+    9119697306829835718ULL, 15381243327921855368ULL, 2686250164449435196ULL,
+    16466917280442198358ULL, 13791704489163125216ULL, 16955859337117924272ULL,
+    17112836394923783642ULL, 4639176427338618063ULL, 16770029310141094964ULL,
+    11049953922966416185ULL, 12012669590884098968ULL, 4859326885929417214ULL, 896380084392586061ULL,
+    7153028362977034008ULL, 10540021163316263301ULL, 9318277998512936585ULL,
+    18344496977694796523ULL, 11374737400567645494ULL, 17158800051138212954ULL,
+    18343197867863253153ULL, 18204799297967861226ULL, 15798973531606348828ULL,
+    9870158263408310459ULL, 17578869832774612627ULL, 8395748875822696932ULL,
+    15310679007370670872ULL, 11205576736030808860ULL, 10123429210002838967ULL,
+    5910544144088393959ULL, 14016615653353687369ULL, 11191676704772957822ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)7870395003430845958U,
-    (uint64_t)18001862936410067720U, (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U,
-    (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U, (uint64_t)7139806720777708306U,
-    (uint64_t)8253938546650739833U, (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U,
-    (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U, (uint64_t)8392845221328116213U,
-    (uint64_t)14630296398338540788U, (uint64_t)4268947906723414372U, (uint64_t)9231207002243517909U,
-    (uint64_t)14261219637616504262U, (uint64_t)7786881626982345356U,
-    (uint64_t)11412720751765882139U, (uint64_t)14119585051365330009U,
-    (uint64_t)15281626286521302128U, (uint64_t)6350171933454266732U,
-    (uint64_t)16559468304937127866U, (uint64_t)13200760478271693417U,
-    (uint64_t)6733381546280350776U, (uint64_t)3801404890075189193U, (uint64_t)2741036364686993903U,
-    (uint64_t)3218612940540174008U, (uint64_t)10894914335165419505U,
-    (uint64_t)11862941430149998362U, (uint64_t)4223151729402839584U, (uint64_t)2913215088487087887U,
-    (uint64_t)14562168920104952953U, (uint64_t)2170089393468287453U,
-    (uint64_t)10520900655016579352U, (uint64_t)7040362608949989273U, (uint64_t)8376510559381705307U,
-    (uint64_t)9142237200448131532U, (uint64_t)5696859948123854080U, (uint64_t)925422306716081180U,
-    (uint64_t)11155545953469186421U, (uint64_t)1888208646862572812U,
-    (uint64_t)11151095998248845721U, (uint64_t)15793503271680275267U,
-    (uint64_t)7729877044494854851U, (uint64_t)6235134673193032913U, (uint64_t)7364280682182401564U,
-    (uint64_t)5479679373325519985U, (uint64_t)17966037684582301763U,
-    (uint64_t)14140891609330279185U, (uint64_t)5814744449740463867U, (uint64_t)5652588426712591652U,
-    (uint64_t)774745682988690912U, (uint64_t)13228255573220500373U, (uint64_t)11949122068786859397U,
-    (uint64_t)8021166392900770376U, (uint64_t)7994323710948720063U, (uint64_t)9924618472877849977U,
-    (uint64_t)17618517523141194266U, (uint64_t)2750424097794401714U,
-    (uint64_t)15481749570715253207U, (uint64_t)14646964509921760497U,
-    (uint64_t)1037442848094301355U, (uint64_t)6295995947389299132U, (uint64_t)16915049722317579514U,
-    (uint64_t)10493877400992990313U, (uint64_t)18391008753060553521U, (uint64_t)483942209623707598U,
-    (uint64_t)2017775662838016613U, (uint64_t)5933251998459363553U, (uint64_t)11789135019970707407U,
-    (uint64_t)5484123723153268336U, (uint64_t)13246954648848484954U, (uint64_t)4774374393926023505U,
-    (uint64_t)14863995618704457336U, (uint64_t)13220153167104973625U,
-    (uint64_t)5988445485312390826U, (uint64_t)17580359464028944682U, (uint64_t)7297100131969874771U,
-    (uint64_t)379931507867989375U, (uint64_t)10927113096513421444U, (uint64_t)17688881974428340857U,
-    (uint64_t)4259872578781463333U, (uint64_t)8573076295966784472U, (uint64_t)16389829450727275032U,
-    (uint64_t)1667243868963568259U, (uint64_t)17730726848925960919U,
-    (uint64_t)11408899874569778008U, (uint64_t)3576527582023272268U,
-    (uint64_t)16492920640224231656U, (uint64_t)7906130545972460130U,
-    (uint64_t)13878604278207681266U, (uint64_t)41446695125652041U, (uint64_t)8891615271337333503U,
-    (uint64_t)2594537723613594470U, (uint64_t)7699579176995770924U, (uint64_t)147458463055730655U,
-    (uint64_t)12120406862739088406U, (uint64_t)12044892493010567063U,
-    (uint64_t)8554076749615475136U, (uint64_t)1005097692260929999U, (uint64_t)2687202654471188715U,
-    (uint64_t)9457588752176879209U, (uint64_t)17472884880062444019U, (uint64_t)9792097892056020166U,
-    (uint64_t)2525246678512797150U, (uint64_t)15958903035313115662U,
-    (uint64_t)11336038170342247032U, (uint64_t)11560342382835141123U,
-    (uint64_t)6212009033479929024U, (uint64_t)8214308203775021229U, (uint64_t)8475469210070503698U,
-    (uint64_t)13287024123485719563U, (uint64_t)12956951963817520723U,
-    (uint64_t)10693035819908470465U, (uint64_t)11375478788224786725U,
-    (uint64_t)16934625208487120398U, (uint64_t)10094585729115874495U,
-    (uint64_t)2763884524395905776U, (uint64_t)13535890148969964883U,
-    (uint64_t)13514657411765064358U, (uint64_t)9903074440788027562U,
-    (uint64_t)17324720726421199990U, (uint64_t)2273931039117368789U, (uint64_t)3442641041506157854U,
-    (uint64_t)1119853641236409612U, (uint64_t)12037070344296077989U, (uint64_t)581736433335671746U,
-    (uint64_t)6019150647054369174U, (uint64_t)14864096138068789375U, (uint64_t)6652995210998318662U,
-    (uint64_t)12773883697029175304U, (uint64_t)12751275631451845119U,
-    (uint64_t)11449095003038250478U, (uint64_t)1025805267334366480U, (uint64_t)2764432500300815015U,
-    (uint64_t)18274564429002844381U, (uint64_t)10445634195592600351U,
-    (uint64_t)11814099592837202735U, (uint64_t)5006796893679120289U, (uint64_t)6908397253997261914U,
-    (uint64_t)13266696965302879279U, (uint64_t)7768715053015037430U, (uint64_t)3569923738654785686U,
-    (uint64_t)5844853453464857549U, (uint64_t)1837340805629559110U, (uint64_t)1034657624388283114U,
-    (uint64_t)711244516069456460U, (uint64_t)12519286026957934814U, (uint64_t)2613464944620837619U,
-    (uint64_t)10003023321338286213U, (uint64_t)7291332092642881376U, (uint64_t)9832199564117004897U,
-    (uint64_t)3280736694860799890U, (uint64_t)6416452202849179874U, (uint64_t)7326961381798642069U,
-    (uint64_t)8435688798040635029U, (uint64_t)16630141263910982958U,
-    (uint64_t)17222635514422533318U, (uint64_t)9482787389178881499U, (uint64_t)836561194658263905U,
-    (uint64_t)3405319043337616649U, (uint64_t)2786146577568026518U, (uint64_t)7625483685691626321U,
-    (uint64_t)6728084875304656716U, (uint64_t)1140997959232544268U, (uint64_t)12847384827606303792U,
-    (uint64_t)1719121337754572070U, (uint64_t)12863589482936438532U, (uint64_t)3880712899640530862U,
-    (uint64_t)2748456882813671564U, (uint64_t)4775988900044623019U, (uint64_t)8937847374382191162U,
-    (uint64_t)3767367347172252295U, (uint64_t)13468672401049388646U,
-    (uint64_t)14359032216842397576U, (uint64_t)2002555958685443975U,
-    (uint64_t)16488678606651526810U, (uint64_t)11826135409597474760U,
-    (uint64_t)15296495673182508601U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 7870395003430845958ULL, 18001862936410067720ULL, 8006461232116967215ULL,
+    5921313779532424762ULL, 10702113371959864307ULL, 8070517410642379879ULL, 7139806720777708306ULL,
+    8253938546650739833ULL, 17490482834545705718ULL, 1065249776797037500ULL, 5018258455937968775ULL,
+    14100621120178668337ULL, 8392845221328116213ULL, 14630296398338540788ULL,
+    4268947906723414372ULL, 9231207002243517909ULL, 14261219637616504262ULL, 7786881626982345356ULL,
+    11412720751765882139ULL, 14119585051365330009ULL, 15281626286521302128ULL,
+    6350171933454266732ULL, 16559468304937127866ULL, 13200760478271693417ULL,
+    6733381546280350776ULL, 3801404890075189193ULL, 2741036364686993903ULL, 3218612940540174008ULL,
+    10894914335165419505ULL, 11862941430149998362ULL, 4223151729402839584ULL,
+    2913215088487087887ULL, 14562168920104952953ULL, 2170089393468287453ULL,
+    10520900655016579352ULL, 7040362608949989273ULL, 8376510559381705307ULL, 9142237200448131532ULL,
+    5696859948123854080ULL, 925422306716081180ULL, 11155545953469186421ULL, 1888208646862572812ULL,
+    11151095998248845721ULL, 15793503271680275267ULL, 7729877044494854851ULL,
+    6235134673193032913ULL, 7364280682182401564ULL, 5479679373325519985ULL, 17966037684582301763ULL,
+    14140891609330279185ULL, 5814744449740463867ULL, 5652588426712591652ULL, 774745682988690912ULL,
+    13228255573220500373ULL, 11949122068786859397ULL, 8021166392900770376ULL,
+    7994323710948720063ULL, 9924618472877849977ULL, 17618517523141194266ULL, 2750424097794401714ULL,
+    15481749570715253207ULL, 14646964509921760497ULL, 1037442848094301355ULL,
+    6295995947389299132ULL, 16915049722317579514ULL, 10493877400992990313ULL,
+    18391008753060553521ULL, 483942209623707598ULL, 2017775662838016613ULL, 5933251998459363553ULL,
+    11789135019970707407ULL, 5484123723153268336ULL, 13246954648848484954ULL,
+    4774374393926023505ULL, 14863995618704457336ULL, 13220153167104973625ULL,
+    5988445485312390826ULL, 17580359464028944682ULL, 7297100131969874771ULL, 379931507867989375ULL,
+    10927113096513421444ULL, 17688881974428340857ULL, 4259872578781463333ULL,
+    8573076295966784472ULL, 16389829450727275032ULL, 1667243868963568259ULL,
+    17730726848925960919ULL, 11408899874569778008ULL, 3576527582023272268ULL,
+    16492920640224231656ULL, 7906130545972460130ULL, 13878604278207681266ULL, 41446695125652041ULL,
+    8891615271337333503ULL, 2594537723613594470ULL, 7699579176995770924ULL, 147458463055730655ULL,
+    12120406862739088406ULL, 12044892493010567063ULL, 8554076749615475136ULL,
+    1005097692260929999ULL, 2687202654471188715ULL, 9457588752176879209ULL, 17472884880062444019ULL,
+    9792097892056020166ULL, 2525246678512797150ULL, 15958903035313115662ULL,
+    11336038170342247032ULL, 11560342382835141123ULL, 6212009033479929024ULL,
+    8214308203775021229ULL, 8475469210070503698ULL, 13287024123485719563ULL,
+    12956951963817520723ULL, 10693035819908470465ULL, 11375478788224786725ULL,
+    16934625208487120398ULL, 10094585729115874495ULL, 2763884524395905776ULL,
+    13535890148969964883ULL, 13514657411765064358ULL, 9903074440788027562ULL,
+    17324720726421199990ULL, 2273931039117368789ULL, 3442641041506157854ULL, 1119853641236409612ULL,
+    12037070344296077989ULL, 581736433335671746ULL, 6019150647054369174ULL, 14864096138068789375ULL,
+    6652995210998318662ULL, 12773883697029175304ULL, 12751275631451845119ULL,
+    11449095003038250478ULL, 1025805267334366480ULL, 2764432500300815015ULL,
+    18274564429002844381ULL, 10445634195592600351ULL, 11814099592837202735ULL,
+    5006796893679120289ULL, 6908397253997261914ULL, 13266696965302879279ULL, 7768715053015037430ULL,
+    3569923738654785686ULL, 5844853453464857549ULL, 1837340805629559110ULL, 1034657624388283114ULL,
+    711244516069456460ULL, 12519286026957934814ULL, 2613464944620837619ULL, 10003023321338286213ULL,
+    7291332092642881376ULL, 9832199564117004897ULL, 3280736694860799890ULL, 6416452202849179874ULL,
+    7326961381798642069ULL, 8435688798040635029ULL, 16630141263910982958ULL,
+    17222635514422533318ULL, 9482787389178881499ULL, 836561194658263905ULL, 3405319043337616649ULL,
+    2786146577568026518ULL, 7625483685691626321ULL, 6728084875304656716ULL, 1140997959232544268ULL,
+    12847384827606303792ULL, 1719121337754572070ULL, 12863589482936438532ULL,
+    3880712899640530862ULL, 2748456882813671564ULL, 4775988900044623019ULL, 8937847374382191162ULL,
+    3767367347172252295ULL, 13468672401049388646ULL, 14359032216842397576ULL,
+    2002555958685443975ULL, 16488678606651526810ULL, 11826135409597474760ULL,
+    15296495673182508601ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_basepoint_table_w5[384U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U,
-    (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U,
-    (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U,
-    (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U,
-    (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U,
-    (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U,
-    (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U,
-    (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U,
-    (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U,
-    (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U,
-    (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U,
-    (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U,
-    (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U,
-    (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U,
-    (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U,
-    (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U,
-    (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U,
-    (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U,
-    (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U,
-    (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U,
-    (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U,
-    (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U,
-    (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U,
-    (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U,
-    (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U,
-    (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U,
-    (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U,
-    (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U,
-    (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U,
-    (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U,
-    (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U,
-    (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U,
-    (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U,
-    (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U,
-    (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U,
-    (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U,
-    (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U,
-    (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U,
-    (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U,
-    (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U,
-    (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U,
-    (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U,
-    (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U,
-    (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U,
-    (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U,
-    (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U,
-    (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U,
-    (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U,
-    (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U,
-    (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U,
-    (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U,
-    (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U,
-    (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U,
-    (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U,
-    (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U,
-    (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U,
-    (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U,
-    (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U,
-    (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U,
-    (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U,
-    (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U,
-    (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U,
-    (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U,
-    (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U,
-    (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U,
-    (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U,
-    (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U,
-    (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U,
-    (uint64_t)13240193491554624643U, (uint64_t)12365034249541329920U,
-    (uint64_t)2924326828590977357U, (uint64_t)5687195797140589099U, (uint64_t)16880427227292834531U,
-    (uint64_t)9691471435758991112U, (uint64_t)16642385273732487288U,
-    (uint64_t)12173806747523009914U, (uint64_t)13142722756877876849U,
-    (uint64_t)8370377548305121979U, (uint64_t)17988526053752025426U, (uint64_t)4818750752684100334U,
-    (uint64_t)5669241919350361655U, (uint64_t)4964810303238518540U, (uint64_t)16709712747671533191U,
-    (uint64_t)4461414404267448242U, (uint64_t)3971798785139504238U, (uint64_t)6276818948740422136U,
-    (uint64_t)1426735892164275762U, (uint64_t)7943622674892418919U, (uint64_t)9864274225563929680U,
-    (uint64_t)57815533745003233U, (uint64_t)10893588105168960233U, (uint64_t)15739162732907069535U,
-    (uint64_t)3923866849462073470U, (uint64_t)12279826158399226875U, (uint64_t)1533015761334846582U,
-    (uint64_t)15860156818568437510U, (uint64_t)8252625373831297988U, (uint64_t)9666953804812706358U,
-    (uint64_t)8767785238646914634U, (uint64_t)14382179044941403551U,
-    (uint64_t)10401039907264254245U, (uint64_t)8584860003763157350U, (uint64_t)3120462679504470266U,
-    (uint64_t)8670255778748340069U, (uint64_t)5313789577940369984U, (uint64_t)16977072364454789224U,
-    (uint64_t)12199578693972188324U, (uint64_t)18211098771672599237U,
-    (uint64_t)12868831556008795030U, (uint64_t)5310155061431048194U,
-    (uint64_t)18114153238435112606U, (uint64_t)14482365809278304512U,
-    (uint64_t)12520721662723001511U, (uint64_t)405943624021143002U, (uint64_t)8146944101507657423U,
-    (uint64_t)181739317780393495U, (uint64_t)81743892273670099U, (uint64_t)14759561962550473930U,
-    (uint64_t)4592623849546992939U, (uint64_t)6916440441743449719U, (uint64_t)1304610503530809833U,
-    (uint64_t)5464930909232486441U, (uint64_t)15414883617496224671U, (uint64_t)8129283345256790U,
-    (uint64_t)18294252198413739489U, (uint64_t)17394115281884857288U,
-    (uint64_t)7808348415224731235U, (uint64_t)13195566655747230608U, (uint64_t)8568194219353949094U,
-    (uint64_t)15329813048672122440U, (uint64_t)9604275495885785744U, (uint64_t)1577712551205219835U,
-    (uint64_t)15964209008022052790U, (uint64_t)15087297920782098160U,
-    (uint64_t)3946031512438511898U, (uint64_t)10050061168984440631U,
-    (uint64_t)11382452014533138316U, (uint64_t)6313670788911952792U,
-    (uint64_t)12015989229696164014U, (uint64_t)5946702628076168852U, (uint64_t)5219995658774362841U,
-    (uint64_t)12230141881068377972U, (uint64_t)12361195202673441956U,
-    (uint64_t)4732862275653856711U, (uint64_t)17221430380805252370U,
-    (uint64_t)15397525953897375810U, (uint64_t)16557437297239563045U,
-    (uint64_t)10101683801868971351U, (uint64_t)1402611372245592868U, (uint64_t)1931806383735563658U,
-    (uint64_t)10991705207471512479U, (uint64_t)861333583207471392U, (uint64_t)15207766844626322355U,
-    (uint64_t)9224628129811432393U, (uint64_t)3497069567089055613U, (uint64_t)11956632757898590316U,
-    (uint64_t)8733729372586312960U, (uint64_t)18091521051714930927U, (uint64_t)77582787724373283U,
-    (uint64_t)9922437373519669237U, (uint64_t)3079321456325704615U, (uint64_t)12171198408512478457U,
-    (uint64_t)17179130884012147596U, (uint64_t)6839115479620367181U, (uint64_t)4421032569964105406U,
-    (uint64_t)10353331468657256053U, (uint64_t)17400988720335968824U,
-    (uint64_t)17138855889417480540U, (uint64_t)4507980080381370611U,
-    (uint64_t)10703175719793781886U, (uint64_t)12598516658725890426U,
-    (uint64_t)8353463412173898932U, (uint64_t)17703029389228422404U, (uint64_t)9313111267107226233U,
-    (uint64_t)5441322942995154196U, (uint64_t)8952817660034465484U, (uint64_t)17571113341183703118U,
-    (uint64_t)7375087953801067019U, (uint64_t)13381466302076453648U, (uint64_t)3218165271423914596U,
-    (uint64_t)16956372157249382685U, (uint64_t)509080090049418841U, (uint64_t)13374233893294084913U,
-    (uint64_t)2988537624204297086U, (uint64_t)4979195832939384620U, (uint64_t)3803931594068976394U,
-    (uint64_t)10731535883829627646U, (uint64_t)12954845047607194278U,
-    (uint64_t)10494298062560667399U, (uint64_t)4967351022190213065U,
-    (uint64_t)13391917938145756456U, (uint64_t)951370484866918160U, (uint64_t)13531334179067685307U,
-    (uint64_t)12868421357919390599U, (uint64_t)15918857042998130258U,
-    (uint64_t)17769743831936974016U, (uint64_t)7137921979260368809U,
-    (uint64_t)12461369180685892062U, (uint64_t)827476514081935199U, (uint64_t)15107282134224767230U,
-    (uint64_t)10084765752802805748U, (uint64_t)3303739059392464407U,
-    (uint64_t)17859532612136591428U, (uint64_t)10949414770405040164U,
-    (uint64_t)12838613589371008785U, (uint64_t)5554397169231540728U,
-    (uint64_t)18375114572169624408U, (uint64_t)15649286703242390139U,
-    (uint64_t)2957281557463706877U, (uint64_t)14000350446219393213U,
-    (uint64_t)14355199721749620351U, (uint64_t)2730856240099299695U,
-    (uint64_t)17528131000714705752U, (uint64_t)2537498525883536360U, (uint64_t)6121058967084509393U,
-    (uint64_t)16897667060435514221U, (uint64_t)12367869599571112440U,
-    (uint64_t)3388831797050807508U, (uint64_t)16791449724090982798U, (uint64_t)2673426123453294928U,
-    (uint64_t)11369313542384405846U, (uint64_t)15641960333586432634U,
-    (uint64_t)15080962589658958379U, (uint64_t)7747943772340226569U, (uint64_t)8075023376199159152U,
-    (uint64_t)8485093027378306528U, (uint64_t)13503706844122243648U, (uint64_t)8401961362938086226U,
-    (uint64_t)8125426002124226402U, (uint64_t)9005399361407785203U, (uint64_t)6847968030066906634U,
-    (uint64_t)11934937736309295197U, (uint64_t)5116750888594772351U, (uint64_t)2817039227179245227U,
-    (uint64_t)17724206901239332980U, (uint64_t)4985702708254058578U, (uint64_t)5786345435756642871U,
-    (uint64_t)17772527414940936938U, (uint64_t)1201320251272957006U,
-    (uint64_t)15787430120324348129U, (uint64_t)6305488781359965661U,
-    (uint64_t)12423900845502858433U, (uint64_t)17485949424202277720U,
-    (uint64_t)2062237315546855852U, (uint64_t)10353639467860902375U, (uint64_t)2315398490451287299U,
-    (uint64_t)15394572894814882621U, (uint64_t)232866113801165640U, (uint64_t)7413443736109338926U,
-    (uint64_t)902719806551551191U, (uint64_t)16568853118619045174U, (uint64_t)14202214862428279177U,
-    (uint64_t)11719595395278861192U, (uint64_t)5890053236389907647U, (uint64_t)9996196494965833627U,
-    (uint64_t)12967056942364782577U, (uint64_t)9034128755157395787U,
-    (uint64_t)17898204904710512655U, (uint64_t)8229373445062993977U,
-    (uint64_t)13580036169519833644U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 8784043285714375740ULL, 8483257759279461889ULL, 8789745728267363600ULL,
+    1770019616739251654ULL, 15992936863339206154ULL, 10037038012062884956ULL,
+    15197544864945402661ULL, 9615747158586711429ULL, 1ULL, 18446744069414584320ULL,
+    18446744073709551615ULL, 4294967294ULL, 10634854829044225757ULL, 351552716085025155ULL,
+    10645315080955407736ULL, 3609262091244858135ULL, 15760741698986874125ULL,
+    14936374388219697827ULL, 15751360096993017895ULL, 18012233706239762398ULL,
+    1993877568177495041ULL, 10345888787846536528ULL, 7746511691117935375ULL,
+    14517043990409914413ULL, 14122549297570634151ULL, 16934610359517083771ULL,
+    5724511325497097418ULL, 8983432969107448705ULL, 2687429970334080245ULL, 16525396802810050288ULL,
+    7602596488871585854ULL, 4813919589149203084ULL, 7680395813780804519ULL, 6687709583048023590ULL,
+    18086445169104142027ULL, 9637814708330203929ULL, 14785108459960679090ULL,
+    3838023279095023581ULL, 3555615526157830307ULL, 5177066488380472871ULL, 18218186719108038403ULL,
+    16281556341699656105ULL, 1524227924561461191ULL, 4148060517641909597ULL, 2858290374115363433ULL,
+    8942772026334130620ULL, 3034451298319885113ULL, 8447866036736640940ULL, 11204933433076256578ULL,
+    18333595740249588297ULL, 8259597024804538246ULL, 9539734295777539786ULL, 9797290423046626413ULL,
+    5777303437849646537ULL, 8739356909899132020ULL, 14815960973766782158ULL,
+    15286581798204509801ULL, 17597362577777019682ULL, 13259283710820519742ULL,
+    10501322996899164670ULL, 1221138904338319642ULL, 14586685489551951885ULL, 895326705426031212ULL,
+    14398171728560617847ULL, 9592550823745097391ULL, 17240998489162206026ULL,
+    8085479283308189196ULL, 14844657737893882826ULL, 15923425394150618234ULL,
+    2997808084773249525ULL, 494323555453660587ULL, 1215695327517794764ULL, 9476207381098391690ULL,
+    7480789678419122995ULL, 15212230329321082489ULL, 436189395349576388ULL, 17377474396456660834ULL,
+    15237013929655017939ULL, 11444428846883781676ULL, 5112749694521428575ULL, 950829367509872073ULL,
+    17665036182057559519ULL, 17205133339690002313ULL, 16233765170251334549ULL,
+    10122775683257972591ULL, 3352514236455632420ULL, 9143148522359954691ULL, 601191684005658860ULL,
+    13398772186646349998ULL, 15512696600132928431ULL, 9128416073728948653ULL,
+    11233051033546138578ULL, 6769345682610122833ULL, 10823233224575054288ULL,
+    9997725227559980175ULL, 6733425642852897415ULL, 16302206918151466066ULL, 1669330822143265921ULL,
+    2661645605036546002ULL, 17182558479745802165ULL, 1165082692376932040ULL, 9470595929011488359ULL,
+    6142147329285324932ULL, 4829075085998111287ULL, 10231370681107338930ULL, 9591876895322495239ULL,
+    10316468561384076618ULL, 11592503647238064235ULL, 13395813606055179632ULL,
+    511127033980815508ULL, 12434976573147649880ULL, 3425094795384359127ULL, 6816971736303023445ULL,
+    15444670609021139344ULL, 9464349818322082360ULL, 16178216413042376883ULL,
+    9595540370774317348ULL, 7229365182662875710ULL, 4601177649460012843ULL, 5455046447382487090ULL,
+    10854066421606187521ULL, 15913416821879788071ULL, 2297365362023460173ULL,
+    2603252216454941350ULL, 6768791943870490934ULL, 15705936687122754810ULL, 9537096567546600694ULL,
+    17580538144855035062ULL, 4496542856965746638ULL, 8444341625922124942ULL,
+    12191263903636183168ULL, 17427332907535974165ULL, 14307569739254103736ULL,
+    13900598742063266169ULL, 7176996424355977650ULL, 5709008170379717479ULL,
+    14471312052264549092ULL, 1464519909491759867ULL, 3328154641049602121ULL,
+    13020349337171136774ULL, 2772166279972051938ULL, 10854476939425975292ULL,
+    1967189930534630940ULL, 2802919076529341959ULL, 14792226094833519208ULL,
+    14675640928566522177ULL, 14838974364643800837ULL, 17631460696099549980ULL,
+    17434186275364935469ULL, 2665648200587705473ULL, 13202122464492564051ULL,
+    7576287350918073341ULL, 2272206013910186424ULL, 14558761641743937843ULL, 5675729149929979729ULL,
+    9043135187561613166ULL, 11750149293830589225ULL, 740555197954307911ULL, 9871738005087190699ULL,
+    17178667634283502053ULL, 18046255991533013265ULL, 4458222096988430430ULL,
+    8452427758526311627ULL, 13825286929656615266ULL, 13956286357198391218ULL,
+    15875692916799995079ULL, 10634895319157013920ULL, 13230116118036304207ULL,
+    8795317393614625606ULL, 7001710806858862020ULL, 7949746088586183478ULL, 14677556044923602317ULL,
+    11184023437485843904ULL, 11215864722023085094ULL, 6444464081471519014ULL,
+    1706241174022415217ULL, 8243975633057550613ULL, 15502902453836085864ULL, 3799182188594003953ULL,
+    3538840175098724094ULL, 13240193491554624643ULL, 12365034249541329920ULL,
+    2924326828590977357ULL, 5687195797140589099ULL, 16880427227292834531ULL, 9691471435758991112ULL,
+    16642385273732487288ULL, 12173806747523009914ULL, 13142722756877876849ULL,
+    8370377548305121979ULL, 17988526053752025426ULL, 4818750752684100334ULL, 5669241919350361655ULL,
+    4964810303238518540ULL, 16709712747671533191ULL, 4461414404267448242ULL, 3971798785139504238ULL,
+    6276818948740422136ULL, 1426735892164275762ULL, 7943622674892418919ULL, 9864274225563929680ULL,
+    57815533745003233ULL, 10893588105168960233ULL, 15739162732907069535ULL, 3923866849462073470ULL,
+    12279826158399226875ULL, 1533015761334846582ULL, 15860156818568437510ULL,
+    8252625373831297988ULL, 9666953804812706358ULL, 8767785238646914634ULL, 14382179044941403551ULL,
+    10401039907264254245ULL, 8584860003763157350ULL, 3120462679504470266ULL, 8670255778748340069ULL,
+    5313789577940369984ULL, 16977072364454789224ULL, 12199578693972188324ULL,
+    18211098771672599237ULL, 12868831556008795030ULL, 5310155061431048194ULL,
+    18114153238435112606ULL, 14482365809278304512ULL, 12520721662723001511ULL,
+    405943624021143002ULL, 8146944101507657423ULL, 181739317780393495ULL, 81743892273670099ULL,
+    14759561962550473930ULL, 4592623849546992939ULL, 6916440441743449719ULL, 1304610503530809833ULL,
+    5464930909232486441ULL, 15414883617496224671ULL, 8129283345256790ULL, 18294252198413739489ULL,
+    17394115281884857288ULL, 7808348415224731235ULL, 13195566655747230608ULL,
+    8568194219353949094ULL, 15329813048672122440ULL, 9604275495885785744ULL, 1577712551205219835ULL,
+    15964209008022052790ULL, 15087297920782098160ULL, 3946031512438511898ULL,
+    10050061168984440631ULL, 11382452014533138316ULL, 6313670788911952792ULL,
+    12015989229696164014ULL, 5946702628076168852ULL, 5219995658774362841ULL,
+    12230141881068377972ULL, 12361195202673441956ULL, 4732862275653856711ULL,
+    17221430380805252370ULL, 15397525953897375810ULL, 16557437297239563045ULL,
+    10101683801868971351ULL, 1402611372245592868ULL, 1931806383735563658ULL,
+    10991705207471512479ULL, 861333583207471392ULL, 15207766844626322355ULL, 9224628129811432393ULL,
+    3497069567089055613ULL, 11956632757898590316ULL, 8733729372586312960ULL,
+    18091521051714930927ULL, 77582787724373283ULL, 9922437373519669237ULL, 3079321456325704615ULL,
+    12171198408512478457ULL, 17179130884012147596ULL, 6839115479620367181ULL,
+    4421032569964105406ULL, 10353331468657256053ULL, 17400988720335968824ULL,
+    17138855889417480540ULL, 4507980080381370611ULL, 10703175719793781886ULL,
+    12598516658725890426ULL, 8353463412173898932ULL, 17703029389228422404ULL,
+    9313111267107226233ULL, 5441322942995154196ULL, 8952817660034465484ULL, 17571113341183703118ULL,
+    7375087953801067019ULL, 13381466302076453648ULL, 3218165271423914596ULL,
+    16956372157249382685ULL, 509080090049418841ULL, 13374233893294084913ULL, 2988537624204297086ULL,
+    4979195832939384620ULL, 3803931594068976394ULL, 10731535883829627646ULL,
+    12954845047607194278ULL, 10494298062560667399ULL, 4967351022190213065ULL,
+    13391917938145756456ULL, 951370484866918160ULL, 13531334179067685307ULL,
+    12868421357919390599ULL, 15918857042998130258ULL, 17769743831936974016ULL,
+    7137921979260368809ULL, 12461369180685892062ULL, 827476514081935199ULL, 15107282134224767230ULL,
+    10084765752802805748ULL, 3303739059392464407ULL, 17859532612136591428ULL,
+    10949414770405040164ULL, 12838613589371008785ULL, 5554397169231540728ULL,
+    18375114572169624408ULL, 15649286703242390139ULL, 2957281557463706877ULL,
+    14000350446219393213ULL, 14355199721749620351ULL, 2730856240099299695ULL,
+    17528131000714705752ULL, 2537498525883536360ULL, 6121058967084509393ULL,
+    16897667060435514221ULL, 12367869599571112440ULL, 3388831797050807508ULL,
+    16791449724090982798ULL, 2673426123453294928ULL, 11369313542384405846ULL,
+    15641960333586432634ULL, 15080962589658958379ULL, 7747943772340226569ULL,
+    8075023376199159152ULL, 8485093027378306528ULL, 13503706844122243648ULL, 8401961362938086226ULL,
+    8125426002124226402ULL, 9005399361407785203ULL, 6847968030066906634ULL, 11934937736309295197ULL,
+    5116750888594772351ULL, 2817039227179245227ULL, 17724206901239332980ULL, 4985702708254058578ULL,
+    5786345435756642871ULL, 17772527414940936938ULL, 1201320251272957006ULL,
+    15787430120324348129ULL, 6305488781359965661ULL, 12423900845502858433ULL,
+    17485949424202277720ULL, 2062237315546855852ULL, 10353639467860902375ULL,
+    2315398490451287299ULL, 15394572894814882621ULL, 232866113801165640ULL, 7413443736109338926ULL,
+    902719806551551191ULL, 16568853118619045174ULL, 14202214862428279177ULL,
+    11719595395278861192ULL, 5890053236389907647ULL, 9996196494965833627ULL,
+    12967056942364782577ULL, 9034128755157395787ULL, 17898204904710512655ULL,
+    8229373445062993977ULL, 13580036169519833644ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_SHA2_Types.h b/include/internal/Hacl_SHA2_Types.h
index 1e51a0f1..5a1eb668 100644
--- a/include/internal/Hacl_SHA2_Types.h
+++ b/include/internal/Hacl_SHA2_Types.h
@@ -35,68 +35,68 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_2p_s
+typedef struct Hacl_Hash_SHA2_uint8_2p_s
 {
   uint8_t *fst;
   uint8_t *snd;
 }
-Hacl_Impl_SHA2_Types_uint8_2p;
+Hacl_Hash_SHA2_uint8_2p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_3p_s
+typedef struct Hacl_Hash_SHA2_uint8_3p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_2p snd;
+  Hacl_Hash_SHA2_uint8_2p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_3p;
+Hacl_Hash_SHA2_uint8_3p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_4p_s
+typedef struct Hacl_Hash_SHA2_uint8_4p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_3p snd;
+  Hacl_Hash_SHA2_uint8_3p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_4p;
+Hacl_Hash_SHA2_uint8_4p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_5p_s
+typedef struct Hacl_Hash_SHA2_uint8_5p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_4p snd;
+  Hacl_Hash_SHA2_uint8_4p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_5p;
+Hacl_Hash_SHA2_uint8_5p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_6p_s
+typedef struct Hacl_Hash_SHA2_uint8_6p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_5p snd;
+  Hacl_Hash_SHA2_uint8_5p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_6p;
+Hacl_Hash_SHA2_uint8_6p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_7p_s
+typedef struct Hacl_Hash_SHA2_uint8_7p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_6p snd;
+  Hacl_Hash_SHA2_uint8_6p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_7p;
+Hacl_Hash_SHA2_uint8_7p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_8p_s
+typedef struct Hacl_Hash_SHA2_uint8_8p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_7p snd;
+  Hacl_Hash_SHA2_uint8_7p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_8p;
+Hacl_Hash_SHA2_uint8_8p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_2x4p_s
+typedef struct Hacl_Hash_SHA2_uint8_2x4p_s
 {
-  Hacl_Impl_SHA2_Types_uint8_4p fst;
-  Hacl_Impl_SHA2_Types_uint8_4p snd;
+  Hacl_Hash_SHA2_uint8_4p fst;
+  Hacl_Hash_SHA2_uint8_4p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_2x4p;
+Hacl_Hash_SHA2_uint8_2x4p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_2x8p_s
+typedef struct Hacl_Hash_SHA2_uint8_2x8p_s
 {
-  Hacl_Impl_SHA2_Types_uint8_8p fst;
-  Hacl_Impl_SHA2_Types_uint8_8p snd;
+  Hacl_Hash_SHA2_uint8_8p fst;
+  Hacl_Hash_SHA2_uint8_8p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_2x8p;
+Hacl_Hash_SHA2_uint8_2x8p;
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/EverCrypt_Chacha20Poly1305.h b/include/msvc/EverCrypt_Chacha20Poly1305.h
index c3eb2655..bd59e48b 100644
--- a/include/msvc/EverCrypt_Chacha20Poly1305.h
+++ b/include/msvc/EverCrypt_Chacha20Poly1305.h
@@ -35,9 +35,9 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Chacha20Poly1305_32.h"
-#include "Hacl_Chacha20Poly1305_256.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 #include "EverCrypt_AutoConfig2.h"
 
 void
diff --git a/include/msvc/EverCrypt_HMAC.h b/include/msvc/EverCrypt_HMAC.h
index 6c64a37f..7d1da14d 100644
--- a/include/msvc/EverCrypt_HMAC.h
+++ b/include/msvc/EverCrypt_HMAC.h
@@ -38,13 +38,14 @@ extern "C" {
 #include "Hacl_Streaming_Types.h"
 #include "Hacl_Krmllib.h"
 #include "Hacl_Hash_SHA2.h"
-#include "Hacl_Hash_Blake2.h"
+#include "Hacl_Hash_Blake2s.h"
+#include "Hacl_Hash_Blake2b.h"
 
 bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___);
 
 typedef Spec_Hash_Definitions_hash_alg EverCrypt_HMAC_supported_alg;
 
-extern void (*EverCrypt_HMAC_hash_256)(uint8_t *x0, uint32_t x1, uint8_t *x2);
+extern void (*EverCrypt_HMAC_hash_256)(uint8_t *x0, uint8_t *x1, uint32_t x2);
 
 void
 EverCrypt_HMAC_compute(
diff --git a/include/msvc/EverCrypt_Hash.h b/include/msvc/EverCrypt_Hash.h
index 6791dc27..b35dcf5f 100644
--- a/include/msvc/EverCrypt_Hash.h
+++ b/include/msvc/EverCrypt_Hash.h
@@ -39,9 +39,10 @@ extern "C" {
 #include "Hacl_Krmllib.h"
 #include "Hacl_Hash_SHA3.h"
 #include "Hacl_Hash_SHA2.h"
-#include "Hacl_Hash_Blake2s_128.h"
-#include "Hacl_Hash_Blake2b_256.h"
-#include "Hacl_Hash_Blake2.h"
+#include "Hacl_Hash_Blake2s_Simd128.h"
+#include "Hacl_Hash_Blake2s.h"
+#include "Hacl_Hash_Blake2b_Simd256.h"
+#include "Hacl_Hash_Blake2b.h"
 #include "EverCrypt_Error.h"
 #include "EverCrypt_AutoConfig2.h"
 
@@ -49,13 +50,13 @@ typedef struct EverCrypt_Hash_state_s_s EverCrypt_Hash_state_s;
 
 uint32_t EverCrypt_Hash_Incremental_hash_len(Spec_Hash_Definitions_hash_alg a);
 
-typedef struct EverCrypt_Hash_Incremental_hash_state_s
+typedef struct EverCrypt_Hash_Incremental_state_t_s
 {
   EverCrypt_Hash_state_s *block_state;
   uint8_t *buf;
   uint64_t total_len;
 }
-EverCrypt_Hash_Incremental_hash_state;
+EverCrypt_Hash_Incremental_state_t;
 
 /**
 Allocate initial state for the agile hash. The argument `a` stands for the
@@ -63,13 +64,13 @@ choice of algorithm (see Hacl_Spec.h). This API will automatically pick the most
 efficient implementation, provided you have called EverCrypt_AutoConfig2_init()
 before. The state is to be freed by calling `free`.
 */
-EverCrypt_Hash_Incremental_hash_state
-*EverCrypt_Hash_Incremental_create_in(Spec_Hash_Definitions_hash_alg a);
+EverCrypt_Hash_Incremental_state_t
+*EverCrypt_Hash_Incremental_malloc(Spec_Hash_Definitions_hash_alg a);
 
 /**
 Reset an existing state to the initial hash state with empty data.
 */
-void EverCrypt_Hash_Incremental_init(EverCrypt_Hash_Incremental_hash_state *s);
+void EverCrypt_Hash_Incremental_reset(EverCrypt_Hash_Incremental_state_t *state);
 
 /**
 Feed an arbitrary amount of data into the hash. This function returns
@@ -80,34 +81,35 @@ algorithm. Both limits are unlikely to be attained in practice.
 */
 EverCrypt_Error_error_code
 EverCrypt_Hash_Incremental_update(
-  EverCrypt_Hash_Incremental_hash_state *s,
-  uint8_t *data,
-  uint32_t len
+  EverCrypt_Hash_Incremental_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
 );
 
 /**
 Perform a run-time test to determine which algorithm was chosen for the given piece of state.
 */
 Spec_Hash_Definitions_hash_alg
-EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_hash_state *s);
+EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_state_t *s);
 
 /**
-Write the resulting hash into `dst`, an array whose length is
+Write the resulting hash into `output`, an array whose length is
 algorithm-specific. You can use the macros defined earlier in this file to
 allocate a destination buffer of the right length. The state remains valid after
-a call to `finish`, meaning the user may feed more data into the hash via
+a call to `digest`, meaning the user may feed more data into the hash via
 `update`. (The finish function operates on an internal copy of the state and
 therefore does not invalidate the client-held state.)
 */
-void EverCrypt_Hash_Incremental_finish(EverCrypt_Hash_Incremental_hash_state *s, uint8_t *dst);
+void
+EverCrypt_Hash_Incremental_digest(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output);
 
 /**
 Free a state previously allocated with `create_in`.
 */
-void EverCrypt_Hash_Incremental_free(EverCrypt_Hash_Incremental_hash_state *s);
+void EverCrypt_Hash_Incremental_free(EverCrypt_Hash_Incremental_state_t *state);
 
 /**
-Hash `input`, of len `len`, into `dst`, an array whose length is determined by
+Hash `input`, of len `input_len`, into `output`, an array whose length is determined by
 your choice of algorithm `a` (see Hacl_Spec.h). You can use the macros defined
 earlier in this file to allocate a destination buffer of the right length. This
 API will automatically pick the most efficient implementation, provided you have
@@ -116,34 +118,34 @@ called EverCrypt_AutoConfig2_init() before.
 void
 EverCrypt_Hash_Incremental_hash(
   Spec_Hash_Definitions_hash_alg a,
-  uint8_t *dst,
+  uint8_t *output,
   uint8_t *input,
-  uint32_t len
+  uint32_t input_len
 );
 
-#define MD5_HASH_LEN ((uint32_t)16U)
+#define MD5_HASH_LEN (16U)
 
-#define SHA1_HASH_LEN ((uint32_t)20U)
+#define SHA1_HASH_LEN (20U)
 
-#define SHA2_224_HASH_LEN ((uint32_t)28U)
+#define SHA2_224_HASH_LEN (28U)
 
-#define SHA2_256_HASH_LEN ((uint32_t)32U)
+#define SHA2_256_HASH_LEN (32U)
 
-#define SHA2_384_HASH_LEN ((uint32_t)48U)
+#define SHA2_384_HASH_LEN (48U)
 
-#define SHA2_512_HASH_LEN ((uint32_t)64U)
+#define SHA2_512_HASH_LEN (64U)
 
-#define SHA3_224_HASH_LEN ((uint32_t)28U)
+#define SHA3_224_HASH_LEN (28U)
 
-#define SHA3_256_HASH_LEN ((uint32_t)32U)
+#define SHA3_256_HASH_LEN (32U)
 
-#define SHA3_384_HASH_LEN ((uint32_t)48U)
+#define SHA3_384_HASH_LEN (48U)
 
-#define SHA3_512_HASH_LEN ((uint32_t)64U)
+#define SHA3_512_HASH_LEN (64U)
 
-#define BLAKE2S_HASH_LEN ((uint32_t)32U)
+#define BLAKE2S_HASH_LEN (32U)
 
-#define BLAKE2B_HASH_LEN ((uint32_t)64U)
+#define BLAKE2B_HASH_LEN (64U)
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/EverCrypt_Poly1305.h b/include/msvc/EverCrypt_Poly1305.h
index 62c00764..fba04059 100644
--- a/include/msvc/EverCrypt_Poly1305.h
+++ b/include/msvc/EverCrypt_Poly1305.h
@@ -35,12 +35,12 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Poly1305_32.h"
-#include "Hacl_Poly1305_256.h"
-#include "Hacl_Poly1305_128.h"
+#include "Hacl_MAC_Poly1305_Simd256.h"
+#include "Hacl_MAC_Poly1305_Simd128.h"
+#include "Hacl_MAC_Poly1305.h"
 #include "EverCrypt_AutoConfig2.h"
 
-void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key);
+void EverCrypt_Poly1305_mac(uint8_t *output, uint8_t *input, uint32_t input_len, uint8_t *key);
 
 #if defined(__cplusplus)
 }
diff --git a/include/Hacl_Chacha20Poly1305_32.h b/include/msvc/Hacl_AEAD_Chacha20Poly1305.h
similarity index 67%
rename from include/Hacl_Chacha20Poly1305_32.h
rename to include/msvc/Hacl_AEAD_Chacha20Poly1305.h
index 624e29fb..d20f0554 100644
--- a/include/Hacl_Chacha20Poly1305_32.h
+++ b/include/msvc/Hacl_AEAD_Chacha20Poly1305.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Chacha20Poly1305_32_H
-#define __Hacl_Chacha20Poly1305_32_H
+#ifndef __Hacl_AEAD_Chacha20Poly1305_H
+#define __Hacl_AEAD_Chacha20Poly1305_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,35 +35,33 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Poly1305_32.h"
 #include "Hacl_Chacha20.h"
 
 /**
-Encrypt a message `m` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
+Encrypt a message `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
 */
 void
-Hacl_Chacha20Poly1305_32_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
 );
 
 /**
@@ -88,20 +86,20 @@ If decryption fails, the array `m` remains unchanged and the function returns th
 @returns 0 on succeess; 1 on failure.
 */
 uint32_t
-Hacl_Chacha20Poly1305_32_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
 );
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Chacha20Poly1305_32_H_DEFINED
+#define __Hacl_AEAD_Chacha20Poly1305_H_DEFINED
 #endif
diff --git a/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.h b/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.h
new file mode 100644
index 00000000..de26c907
--- /dev/null
+++ b/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.h
@@ -0,0 +1,104 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_AEAD_Chacha20Poly1305_Simd128_H
+#define __Hacl_AEAD_Chacha20Poly1305_Simd128_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Chacha20_Vec128.h"
+
+/**
+Encrypt a message `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+*/
+void
+Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
+);
+
+/**
+Decrypt a ciphertext `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+If decryption succeeds, the resulting plaintext is stored in `output` and the function returns the success code 0.
+If decryption fails, the array `output` remains unchanged and the function returns the error code 1.
+
+@param output Pointer to `input_len` bytes of memory where the message is written to.
+@param input Pointer to `input_len` bytes of memory where the ciphertext is read from.
+@param input_len Length of the ciphertext.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+@param tag Pointer to 16 bytes of memory where the mac is read from.
+
+@returns 0 on succeess; 1 on failure.
+*/
+uint32_t
+Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_AEAD_Chacha20Poly1305_Simd128_H_DEFINED
+#endif
diff --git a/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.h b/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.h
new file mode 100644
index 00000000..0abcdc59
--- /dev/null
+++ b/include/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.h
@@ -0,0 +1,104 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_AEAD_Chacha20Poly1305_Simd256_H
+#define __Hacl_AEAD_Chacha20Poly1305_Simd256_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Chacha20_Vec256.h"
+
+/**
+Encrypt a message `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+*/
+void
+Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
+);
+
+/**
+Decrypt a ciphertext `input` with key `key`.
+
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
+
+If decryption succeeds, the resulting plaintext is stored in `output` and the function returns the success code 0.
+If decryption fails, the array `output` remains unchanged and the function returns the error code 1.
+
+@param output Pointer to `input_len` bytes of memory where the message is written to.
+@param input Pointer to `input_len` bytes of memory where the ciphertext is read from.
+@param input_len Length of the ciphertext.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+@param tag Pointer to 16 bytes of memory where the mac is read from.
+
+@returns 0 on succeess; 1 on failure.
+*/
+uint32_t
+Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_AEAD_Chacha20Poly1305_Simd256_H_DEFINED
+#endif
diff --git a/include/msvc/Hacl_Chacha20Poly1305_128.h b/include/msvc/Hacl_Chacha20Poly1305_128.h
deleted file mode 100644
index 630fab93..00000000
--- a/include/msvc/Hacl_Chacha20Poly1305_128.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Chacha20Poly1305_128_H
-#define __Hacl_Chacha20Poly1305_128_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Poly1305_128.h"
-#include "Hacl_Chacha20_Vec128.h"
-
-/**
-Encrypt a message `m` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
-*/
-void
-Hacl_Chacha20Poly1305_128_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
-);
-
-/**
-Decrypt a ciphertext `cipher` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-If decryption succeeds, the resulting plaintext is stored in `m` and the function returns the success code 0.
-If decryption fails, the array `m` remains unchanged and the function returns the error code 1.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the ciphertext.
-@param m Pointer to `mlen` bytes of memory where the message is written to.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is read from.
-@param mac Pointer to 16 bytes of memory where the mac is read from.
-
-@returns 0 on succeess; 1 on failure.
-*/
-uint32_t
-Hacl_Chacha20Poly1305_128_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
-);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Chacha20Poly1305_128_H_DEFINED
-#endif
diff --git a/include/msvc/Hacl_Chacha20Poly1305_256.h b/include/msvc/Hacl_Chacha20Poly1305_256.h
deleted file mode 100644
index ff0f2e60..00000000
--- a/include/msvc/Hacl_Chacha20Poly1305_256.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Chacha20Poly1305_256_H
-#define __Hacl_Chacha20Poly1305_256_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Poly1305_256.h"
-#include "Hacl_Chacha20_Vec256.h"
-
-/**
-Encrypt a message `m` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
-*/
-void
-Hacl_Chacha20Poly1305_256_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
-);
-
-/**
-Decrypt a ciphertext `cipher` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
-
-If decryption succeeds, the resulting plaintext is stored in `m` and the function returns the success code 0.
-If decryption fails, the array `m` remains unchanged and the function returns the error code 1.
-
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the ciphertext.
-@param m Pointer to `mlen` bytes of memory where the message is written to.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is read from.
-@param mac Pointer to 16 bytes of memory where the mac is read from.
-
-@returns 0 on succeess; 1 on failure.
-*/
-uint32_t
-Hacl_Chacha20Poly1305_256_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
-);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Chacha20Poly1305_256_H_DEFINED
-#endif
diff --git a/include/msvc/Hacl_HMAC.h b/include/msvc/Hacl_HMAC.h
index 84dbedf5..e1dc04f2 100644
--- a/include/msvc/Hacl_HMAC.h
+++ b/include/msvc/Hacl_HMAC.h
@@ -37,7 +37,8 @@ extern "C" {
 
 #include "Hacl_Krmllib.h"
 #include "Hacl_Hash_SHA2.h"
-#include "Hacl_Hash_Blake2.h"
+#include "Hacl_Hash_Blake2s.h"
+#include "Hacl_Hash_Blake2b.h"
 
 /**
 Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`.
@@ -46,7 +47,7 @@ The key can be any length and will be hashed if it is longer and padded if it is
 `dst` must point to 20 bytes of memory.
 */
 void
-Hacl_HMAC_legacy_compute_sha1(
+Hacl_HMAC_compute_sha1(
   uint8_t *dst,
   uint8_t *key,
   uint32_t key_len,
diff --git a/include/msvc/Hacl_HMAC_Blake2b_256.h b/include/msvc/Hacl_HMAC_Blake2b_256.h
index e94ba05f..d8f3e9e1 100644
--- a/include/msvc/Hacl_HMAC_Blake2b_256.h
+++ b/include/msvc/Hacl_HMAC_Blake2b_256.h
@@ -36,7 +36,7 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Krmllib.h"
-#include "Hacl_Hash_Blake2b_256.h"
+#include "Hacl_Hash_Blake2b_Simd256.h"
 
 /**
 Write the HMAC-BLAKE2b MAC of a message (`data`) by using a key (`key`) into `dst`.
diff --git a/include/msvc/Hacl_HMAC_Blake2s_128.h b/include/msvc/Hacl_HMAC_Blake2s_128.h
index 7f20343e..5ff79038 100644
--- a/include/msvc/Hacl_HMAC_Blake2s_128.h
+++ b/include/msvc/Hacl_HMAC_Blake2s_128.h
@@ -35,7 +35,7 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Hash_Blake2s_128.h"
+#include "Hacl_Hash_Blake2s_Simd128.h"
 
 /**
 Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`.
diff --git a/include/msvc/Hacl_HPKE_Curve51_CP128_SHA256.h b/include/msvc/Hacl_HPKE_Curve51_CP128_SHA256.h
index a768df6b..a46db470 100644
--- a/include/msvc/Hacl_HPKE_Curve51_CP128_SHA256.h
+++ b/include/msvc/Hacl_HPKE_Curve51_CP128_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve51_CP128_SHA512.h b/include/msvc/Hacl_HPKE_Curve51_CP128_SHA512.h
index a4388707..89091754 100644
--- a/include/msvc/Hacl_HPKE_Curve51_CP128_SHA512.h
+++ b/include/msvc/Hacl_HPKE_Curve51_CP128_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve51_CP256_SHA256.h b/include/msvc/Hacl_HPKE_Curve51_CP256_SHA256.h
index 37b26f6a..83ba2adb 100644
--- a/include/msvc/Hacl_HPKE_Curve51_CP256_SHA256.h
+++ b/include/msvc/Hacl_HPKE_Curve51_CP256_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve51_CP256_SHA512.h b/include/msvc/Hacl_HPKE_Curve51_CP256_SHA512.h
index f7240a95..1a796ab7 100644
--- a/include/msvc/Hacl_HPKE_Curve51_CP256_SHA512.h
+++ b/include/msvc/Hacl_HPKE_Curve51_CP256_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve51_CP32_SHA256.h b/include/msvc/Hacl_HPKE_Curve51_CP32_SHA256.h
index e48242e6..d249ba05 100644
--- a/include/msvc/Hacl_HPKE_Curve51_CP32_SHA256.h
+++ b/include/msvc/Hacl_HPKE_Curve51_CP32_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve51_CP32_SHA512.h b/include/msvc/Hacl_HPKE_Curve51_CP32_SHA512.h
index 057f8769..ddc00da3 100644
--- a/include/msvc/Hacl_HPKE_Curve51_CP32_SHA512.h
+++ b/include/msvc/Hacl_HPKE_Curve51_CP32_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_51.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve64_CP128_SHA256.h b/include/msvc/Hacl_HPKE_Curve64_CP128_SHA256.h
index 1694a123..fda63e52 100644
--- a/include/msvc/Hacl_HPKE_Curve64_CP128_SHA256.h
+++ b/include/msvc/Hacl_HPKE_Curve64_CP128_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve64_CP128_SHA512.h b/include/msvc/Hacl_HPKE_Curve64_CP128_SHA512.h
index 23f52f25..c8b06ca8 100644
--- a/include/msvc/Hacl_HPKE_Curve64_CP128_SHA512.h
+++ b/include/msvc/Hacl_HPKE_Curve64_CP128_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve64_CP256_SHA256.h b/include/msvc/Hacl_HPKE_Curve64_CP256_SHA256.h
index 33d471bc..2da8dbcf 100644
--- a/include/msvc/Hacl_HPKE_Curve64_CP256_SHA256.h
+++ b/include/msvc/Hacl_HPKE_Curve64_CP256_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve64_CP256_SHA512.h b/include/msvc/Hacl_HPKE_Curve64_CP256_SHA512.h
index d59c1ee4..87d919e1 100644
--- a/include/msvc/Hacl_HPKE_Curve64_CP256_SHA512.h
+++ b/include/msvc/Hacl_HPKE_Curve64_CP256_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve64_CP32_SHA256.h b/include/msvc/Hacl_HPKE_Curve64_CP32_SHA256.h
index 5aaa07e1..bd4b9b59 100644
--- a/include/msvc/Hacl_HPKE_Curve64_CP32_SHA256.h
+++ b/include/msvc/Hacl_HPKE_Curve64_CP32_SHA256.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_Curve64_CP32_SHA512.h b/include/msvc/Hacl_HPKE_Curve64_CP32_SHA512.h
index 594000f2..0d2bb8f0 100644
--- a/include/msvc/Hacl_HPKE_Curve64_CP32_SHA512.h
+++ b/include/msvc/Hacl_HPKE_Curve64_CP32_SHA512.h
@@ -38,7 +38,7 @@ extern "C" {
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
 #include "Hacl_Curve25519_64.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_P256_CP128_SHA256.h b/include/msvc/Hacl_HPKE_P256_CP128_SHA256.h
index 613fef83..c76a100d 100644
--- a/include/msvc/Hacl_HPKE_P256_CP128_SHA256.h
+++ b/include/msvc/Hacl_HPKE_P256_CP128_SHA256.h
@@ -37,7 +37,7 @@ extern "C" {
 
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
 uint32_t
 Hacl_HPKE_P256_CP128_SHA256_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_P256_CP256_SHA256.h b/include/msvc/Hacl_HPKE_P256_CP256_SHA256.h
index 6e74b1db..4a33eb8a 100644
--- a/include/msvc/Hacl_HPKE_P256_CP256_SHA256.h
+++ b/include/msvc/Hacl_HPKE_P256_CP256_SHA256.h
@@ -37,7 +37,7 @@ extern "C" {
 
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
 uint32_t
 Hacl_HPKE_P256_CP256_SHA256_setupBaseS(
diff --git a/include/msvc/Hacl_HPKE_P256_CP32_SHA256.h b/include/msvc/Hacl_HPKE_P256_CP32_SHA256.h
index 1f8679d4..2818abed 100644
--- a/include/msvc/Hacl_HPKE_P256_CP32_SHA256.h
+++ b/include/msvc/Hacl_HPKE_P256_CP32_SHA256.h
@@ -37,7 +37,7 @@ extern "C" {
 
 #include "Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE.h"
 #include "Hacl_HKDF.h"
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
 uint32_t
 Hacl_HPKE_P256_CP32_SHA256_setupBaseS(
diff --git a/include/msvc/Hacl_Hash_Blake2.h b/include/msvc/Hacl_Hash_Blake2.h
deleted file mode 100644
index 3ee29015..00000000
--- a/include/msvc/Hacl_Hash_Blake2.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Hash_Blake2_H
-#define __Hacl_Hash_Blake2_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Krmllib.h"
-
-void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn);
-
-void
-Hacl_Blake2b_32_blake2b_update_key(
-  uint64_t *wv,
-  uint64_t *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-);
-
-void
-Hacl_Blake2b_32_blake2b_update_multi(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks,
-  uint32_t nb
-);
-
-void
-Hacl_Blake2b_32_blake2b_update_last(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint32_t rem,
-  uint8_t *d
-);
-
-void Hacl_Blake2b_32_blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash);
-
-/**
-Write the BLAKE2b digest of message `d` using key `k` into `output`.
-
-@param nn Length of the to-be-generated digest with 1 <= `nn` <= 64.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2b_32_blake2b(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-);
-
-uint64_t *Hacl_Blake2b_32_blake2b_malloc(void);
-
-void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn);
-
-void
-Hacl_Blake2s_32_blake2s_update_key(
-  uint32_t *wv,
-  uint32_t *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-);
-
-void
-Hacl_Blake2s_32_blake2s_update_multi(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint8_t *blocks,
-  uint32_t nb
-);
-
-void
-Hacl_Blake2s_32_blake2s_update_last(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint32_t rem,
-  uint8_t *d
-);
-
-void Hacl_Blake2s_32_blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash);
-
-/**
-Write the BLAKE2s digest of message `d` using key `k` into `output`.
-
-@param nn Length of to-be-generated digest with 1 <= `nn` <= 32.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2s_32_blake2s(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-);
-
-uint32_t *Hacl_Blake2s_32_blake2s_malloc(void);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Hash_Blake2_H_DEFINED
-#endif
diff --git a/include/msvc/Hacl_Streaming_Blake2b_256.h b/include/msvc/Hacl_Hash_Blake2b.h
similarity index 56%
rename from include/msvc/Hacl_Streaming_Blake2b_256.h
rename to include/msvc/Hacl_Hash_Blake2b.h
index 20e42d7c..414574f9 100644
--- a/include/msvc/Hacl_Streaming_Blake2b_256.h
+++ b/include/msvc/Hacl_Hash_Blake2b.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Blake2b_256_H
-#define __Hacl_Streaming_Blake2b_256_H
+#ifndef __Hacl_Hash_Blake2b_H
+#define __Hacl_Hash_Blake2b_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -37,67 +37,71 @@ extern "C" {
 
 #include "Hacl_Streaming_Types.h"
 #include "Hacl_Krmllib.h"
-#include "Hacl_Hash_Blake2b_256.h"
 
-typedef struct Hacl_Streaming_Blake2b_256_blake2b_256_block_state_s
+typedef struct Hacl_Hash_Blake2b_block_state_t_s
 {
-  Lib_IntVector_Intrinsics_vec256 *fst;
-  Lib_IntVector_Intrinsics_vec256 *snd;
+  uint64_t *fst;
+  uint64_t *snd;
 }
-Hacl_Streaming_Blake2b_256_blake2b_256_block_state;
+Hacl_Hash_Blake2b_block_state_t;
 
-typedef struct Hacl_Streaming_Blake2b_256_blake2b_256_state_s
+typedef struct Hacl_Hash_Blake2b_state_t_s
 {
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state;
+  Hacl_Hash_Blake2b_block_state_t block_state;
   uint8_t *buf;
   uint64_t total_len;
 }
-Hacl_Streaming_Blake2b_256_blake2b_256_state;
+Hacl_Hash_Blake2b_state_t;
 
 /**
   State allocation function when there is no key
 */
-Hacl_Streaming_Blake2b_256_blake2b_256_state
-*Hacl_Streaming_Blake2b_256_blake2b_256_no_key_create_in(void);
+Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void);
 
 /**
-  (Re-)initialization function when there is no key
+  Re-initialization function when there is no key
 */
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *s
-);
+void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *state);
 
 /**
   Update function when there is no key; 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *p,
-  uint8_t *data,
-  uint32_t len
-);
+Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint32_t chunk_len);
 
 /**
   Finish function when there is no key
 */
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *p,
-  uint8_t *dst
-);
+void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output);
 
 /**
   Free state function when there is no key
 */
+void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state);
+
+/**
+Write the BLAKE2b digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
 void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_free(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *s
+Hacl_Hash_Blake2b_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
 );
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Blake2b_256_H_DEFINED
+#define __Hacl_Hash_Blake2b_H_DEFINED
 #endif
diff --git a/include/msvc/Hacl_Hash_Blake2b_Simd256.h b/include/msvc/Hacl_Hash_Blake2b_Simd256.h
new file mode 100644
index 00000000..adddce66
--- /dev/null
+++ b/include/msvc/Hacl_Hash_Blake2b_Simd256.h
@@ -0,0 +1,113 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Hash_Blake2b_Simd256_H
+#define __Hacl_Hash_Blake2b_Simd256_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Streaming_Types.h"
+#include "Hacl_Krmllib.h"
+#include "libintvector.h"
+
+typedef struct Hacl_Hash_Blake2b_Simd256_block_state_t_s
+{
+  Lib_IntVector_Intrinsics_vec256 *fst;
+  Lib_IntVector_Intrinsics_vec256 *snd;
+}
+Hacl_Hash_Blake2b_Simd256_block_state_t;
+
+typedef struct Hacl_Hash_Blake2b_Simd256_state_t_s
+{
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state;
+  uint8_t *buf;
+  uint64_t total_len;
+}
+Hacl_Hash_Blake2b_Simd256_state_t;
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void);
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *state);
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2b_Simd256_update(
+  Hacl_Hash_Blake2b_Simd256_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+);
+
+/**
+  Finish function when there is no key
+*/
+void
+Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output);
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state);
+
+/**
+Write the BLAKE2b digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2b_Simd256_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Hash_Blake2b_Simd256_H_DEFINED
+#endif
diff --git a/include/msvc/Hacl_Streaming_Blake2s_128.h b/include/msvc/Hacl_Hash_Blake2s.h
similarity index 56%
rename from include/msvc/Hacl_Streaming_Blake2s_128.h
rename to include/msvc/Hacl_Hash_Blake2s.h
index 60e209ff..2c0d7c5b 100644
--- a/include/msvc/Hacl_Streaming_Blake2s_128.h
+++ b/include/msvc/Hacl_Hash_Blake2s.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Blake2s_128_H
-#define __Hacl_Streaming_Blake2s_128_H
+#ifndef __Hacl_Hash_Blake2s_H
+#define __Hacl_Hash_Blake2s_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -36,67 +36,71 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Streaming_Types.h"
-#include "Hacl_Hash_Blake2s_128.h"
 
-typedef struct Hacl_Streaming_Blake2s_128_blake2s_128_block_state_s
+typedef struct Hacl_Hash_Blake2s_block_state_t_s
 {
-  Lib_IntVector_Intrinsics_vec128 *fst;
-  Lib_IntVector_Intrinsics_vec128 *snd;
+  uint32_t *fst;
+  uint32_t *snd;
 }
-Hacl_Streaming_Blake2s_128_blake2s_128_block_state;
+Hacl_Hash_Blake2s_block_state_t;
 
-typedef struct Hacl_Streaming_Blake2s_128_blake2s_128_state_s
+typedef struct Hacl_Hash_Blake2s_state_t_s
 {
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state;
+  Hacl_Hash_Blake2s_block_state_t block_state;
   uint8_t *buf;
   uint64_t total_len;
 }
-Hacl_Streaming_Blake2s_128_blake2s_128_state;
+Hacl_Hash_Blake2s_state_t;
 
 /**
   State allocation function when there is no key
 */
-Hacl_Streaming_Blake2s_128_blake2s_128_state
-*Hacl_Streaming_Blake2s_128_blake2s_128_no_key_create_in(void);
+Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void);
 
 /**
-  (Re-)initialization function when there is no key
+  Re-initialization function when there is no key
 */
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *s
-);
+void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *state);
 
 /**
   Update function when there is no key; 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *p,
-  uint8_t *data,
-  uint32_t len
-);
+Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint32_t chunk_len);
 
 /**
   Finish function when there is no key
 */
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *p,
-  uint8_t *dst
-);
+void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output);
 
 /**
   Free state function when there is no key
 */
+void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state);
+
+/**
+Write the BLAKE2s digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
 void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_free(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *s
+Hacl_Hash_Blake2s_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
 );
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Blake2s_128_H_DEFINED
+#define __Hacl_Hash_Blake2s_H_DEFINED
 #endif
diff --git a/include/msvc/Hacl_Hash_Blake2s_Simd128.h b/include/msvc/Hacl_Hash_Blake2s_Simd128.h
new file mode 100644
index 00000000..6484005e
--- /dev/null
+++ b/include/msvc/Hacl_Hash_Blake2s_Simd128.h
@@ -0,0 +1,112 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __Hacl_Hash_Blake2s_Simd128_H
+#define __Hacl_Hash_Blake2s_Simd128_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "Hacl_Streaming_Types.h"
+#include "libintvector.h"
+
+typedef struct Hacl_Hash_Blake2s_Simd128_block_state_t_s
+{
+  Lib_IntVector_Intrinsics_vec128 *fst;
+  Lib_IntVector_Intrinsics_vec128 *snd;
+}
+Hacl_Hash_Blake2s_Simd128_block_state_t;
+
+typedef struct Hacl_Hash_Blake2s_Simd128_state_t_s
+{
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state;
+  uint8_t *buf;
+  uint64_t total_len;
+}
+Hacl_Hash_Blake2s_Simd128_state_t;
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void);
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *state);
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2s_Simd128_update(
+  Hacl_Hash_Blake2s_Simd128_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+);
+
+/**
+  Finish function when there is no key
+*/
+void
+Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output);
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state);
+
+/**
+Write the BLAKE2s digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2s_Simd128_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __Hacl_Hash_Blake2s_Simd128_H_DEFINED
+#endif
diff --git a/include/msvc/Hacl_Hash_MD5.h b/include/msvc/Hacl_Hash_MD5.h
index dd4c75e0..db93d7d6 100644
--- a/include/msvc/Hacl_Hash_MD5.h
+++ b/include/msvc/Hacl_Hash_MD5.h
@@ -37,25 +37,25 @@ extern "C" {
 
 #include "Hacl_Streaming_Types.h"
 
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_MD5_state;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_MD5_state_t;
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_malloc(void);
 
-void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_MD5_reset(Hacl_Streaming_MD_state_32 *state);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len);
+Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len);
 
-void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_MD5_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
 
-void Hacl_Streaming_MD5_legacy_free(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_MD5_free(Hacl_Streaming_MD_state_32 *state);
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_state_32 *s0);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_copy(Hacl_Streaming_MD_state_32 *state);
 
-void Hacl_Streaming_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_MD5_hash(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/Hacl_Hash_SHA1.h b/include/msvc/Hacl_Hash_SHA1.h
index 2737b20f..19045440 100644
--- a/include/msvc/Hacl_Hash_SHA1.h
+++ b/include/msvc/Hacl_Hash_SHA1.h
@@ -37,25 +37,25 @@ extern "C" {
 
 #include "Hacl_Streaming_Types.h"
 
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA1_state;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_SHA1_state_t;
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_malloc(void);
 
-void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA1_reset(Hacl_Streaming_MD_state_32 *state);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len);
+Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len);
 
-void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_SHA1_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
 
-void Hacl_Streaming_SHA1_legacy_free(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA1_free(Hacl_Streaming_MD_state_32 *state);
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_state_32 *s0);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_copy(Hacl_Streaming_MD_state_32 *state);
 
-void Hacl_Streaming_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA1_hash(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/Hacl_Hash_SHA2.h b/include/msvc/Hacl_Hash_SHA2.h
index 8f98d878..1c2fab71 100644
--- a/include/msvc/Hacl_Hash_SHA2.h
+++ b/include/msvc/Hacl_Hash_SHA2.h
@@ -38,19 +38,19 @@ extern "C" {
 #include "Hacl_Streaming_Types.h"
 #include "Hacl_Krmllib.h"
 
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA2_state_sha2_224;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_SHA2_state_sha2_224;
 
-typedef Hacl_Streaming_MD_state_32 Hacl_Streaming_SHA2_state_sha2_256;
+typedef Hacl_Streaming_MD_state_32 Hacl_Hash_SHA2_state_sha2_256;
 
-typedef Hacl_Streaming_MD_state_64 Hacl_Streaming_SHA2_state_sha2_384;
+typedef Hacl_Streaming_MD_state_64 Hacl_Hash_SHA2_state_sha2_384;
 
-typedef Hacl_Streaming_MD_state_64 Hacl_Streaming_SHA2_state_sha2_512;
+typedef Hacl_Streaming_MD_state_64 Hacl_Hash_SHA2_state_sha2_512;
 
 /**
 Allocate initial state for the SHA2_256 hash. The state is to be freed by
 calling `free_256`.
 */
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_256(void);
 
 /**
 Copies the state passed as argument into a newly allocated state (deep copy).
@@ -58,73 +58,73 @@ The state is to be freed by calling `free_256`. Cloning the state this way is
 useful, for instance, if your control-flow diverges and you need to feed
 more (different) data into the hash in each branch.
 */
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state_32 *s0);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_copy_256(Hacl_Streaming_MD_state_32 *state);
 
 /**
 Reset an existing state to the initial hash state with empty data.
 */
-void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA2_reset_256(Hacl_Streaming_MD_state_32 *state);
 
 /**
 Feed an arbitrary amount of data into the hash. This function returns 0 for
 success, or 1 if the combined length of all of the data passed to `update_256`
-(since the last call to `init_256`) exceeds 2^61-1 bytes.
+(since the last call to `reset_256`) exceeds 2^61-1 bytes.
 
 This function is identical to the update function for SHA2_224.
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_256(
-  Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_256(
+  Hacl_Streaming_MD_state_32 *state,
   uint8_t *input,
   uint32_t input_len
 );
 
 /**
-Write the resulting hash into `dst`, an array of 32 bytes. The state remains
-valid after a call to `finish_256`, meaning the user may feed more data into
-the hash via `update_256`. (The finish_256 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 32 bytes. The state remains
+valid after a call to `digest_256`, meaning the user may feed more data into
+the hash via `update_256`. (The digest_256 function operates on an internal copy of
 the state and therefore does not invalidate the client-held state `p`.)
 */
-void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_256(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
 
 /**
-Free a state allocated with `create_in_256`.
+Free a state allocated with `malloc_256`.
 
 This function is identical to the free function for SHA2_224.
 */
-void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA2_free_256(Hacl_Streaming_MD_state_32 *state);
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 32 bytes.
 */
-void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len);
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void);
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_224(void);
 
-void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s);
+void Hacl_Hash_SHA2_reset_224(Hacl_Streaming_MD_state_32 *state);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_224(
-  Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_224(
+  Hacl_Streaming_MD_state_32 *state,
   uint8_t *input,
   uint32_t input_len
 );
 
 /**
-Write the resulting hash into `dst`, an array of 28 bytes. The state remains
-valid after a call to `finish_224`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 28 bytes. The state remains
+valid after a call to `digest_224`, meaning the user may feed more data into
 the hash via `update_224`.
 */
-void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *output);
 
-void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_MD_state_32 *p);
+void Hacl_Hash_SHA2_free_224(Hacl_Streaming_MD_state_32 *state);
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 28 bytes.
 */
-void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_224(uint8_t *output, uint8_t *input, uint32_t input_len);
 
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void);
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_512(void);
 
 /**
 Copies the state passed as argument into a newly allocated state (deep copy).
@@ -132,68 +132,68 @@ The state is to be freed by calling `free_512`. Cloning the state this way is
 useful, for instance, if your control-flow diverges and you need to feed
 more (different) data into the hash in each branch.
 */
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state_64 *s0);
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_copy_512(Hacl_Streaming_MD_state_64 *state);
 
-void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s);
+void Hacl_Hash_SHA2_reset_512(Hacl_Streaming_MD_state_64 *state);
 
 /**
 Feed an arbitrary amount of data into the hash. This function returns 0 for
 success, or 1 if the combined length of all of the data passed to `update_512`
-(since the last call to `init_512`) exceeds 2^125-1 bytes.
+(since the last call to `reset_512`) exceeds 2^125-1 bytes.
 
 This function is identical to the update function for SHA2_384.
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_512(
-  Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_512(
+  Hacl_Streaming_MD_state_64 *state,
   uint8_t *input,
   uint32_t input_len
 );
 
 /**
-Write the resulting hash into `dst`, an array of 64 bytes. The state remains
-valid after a call to `finish_512`, meaning the user may feed more data into
-the hash via `update_512`. (The finish_512 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 64 bytes. The state remains
+valid after a call to `digest_512`, meaning the user may feed more data into
+the hash via `update_512`. (The digest_512 function operates on an internal copy of
 the state and therefore does not invalidate the client-held state `p`.)
 */
-void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_512(Hacl_Streaming_MD_state_64 *state, uint8_t *output);
 
 /**
-Free a state allocated with `create_in_512`.
+Free a state allocated with `malloc_512`.
 
 This function is identical to the free function for SHA2_384.
 */
-void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_MD_state_64 *s);
+void Hacl_Hash_SHA2_free_512(Hacl_Streaming_MD_state_64 *state);
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 64 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 64 bytes.
 */
-void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_512(uint8_t *output, uint8_t *input, uint32_t input_len);
 
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void);
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_384(void);
 
-void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s);
+void Hacl_Hash_SHA2_reset_384(Hacl_Streaming_MD_state_64 *state);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_384(
-  Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_384(
+  Hacl_Streaming_MD_state_64 *state,
   uint8_t *input,
   uint32_t input_len
 );
 
 /**
-Write the resulting hash into `dst`, an array of 48 bytes. The state remains
-valid after a call to `finish_384`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 48 bytes. The state remains
+valid after a call to `digest_384`, meaning the user may feed more data into
 the hash via `update_384`.
 */
-void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst);
+void Hacl_Hash_SHA2_digest_384(Hacl_Streaming_MD_state_64 *state, uint8_t *output);
 
-void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_MD_state_64 *p);
+void Hacl_Hash_SHA2_free_384(Hacl_Streaming_MD_state_64 *state);
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 48 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 48 bytes.
 */
-void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void Hacl_Hash_SHA2_hash_384(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/Hacl_Hash_SHA3.h b/include/msvc/Hacl_Hash_SHA3.h
index e2f5ff06..19123304 100644
--- a/include/msvc/Hacl_Hash_SHA3.h
+++ b/include/msvc/Hacl_Hash_SHA3.h
@@ -37,48 +37,48 @@ extern "C" {
 
 #include "Hacl_Streaming_Types.h"
 
-typedef struct Hacl_Streaming_Keccak_hash_buf_s
+typedef struct Hacl_Hash_SHA3_hash_buf_s
 {
   Spec_Hash_Definitions_hash_alg fst;
   uint64_t *snd;
 }
-Hacl_Streaming_Keccak_hash_buf;
+Hacl_Hash_SHA3_hash_buf;
 
-typedef struct Hacl_Streaming_Keccak_state_s
+typedef struct Hacl_Hash_SHA3_state_t_s
 {
-  Hacl_Streaming_Keccak_hash_buf block_state;
+  Hacl_Hash_SHA3_hash_buf block_state;
   uint8_t *buf;
   uint64_t total_len;
 }
-Hacl_Streaming_Keccak_state;
+Hacl_Hash_SHA3_state_t;
 
-Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s);
+Spec_Hash_Definitions_hash_alg Hacl_Hash_SHA3_get_alg(Hacl_Hash_SHA3_state_t *s);
 
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_hash_alg a);
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_hash_alg a);
 
-void Hacl_Streaming_Keccak_free(Hacl_Streaming_Keccak_state *s);
+void Hacl_Hash_SHA3_free(Hacl_Hash_SHA3_state_t *state);
 
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_state *s0);
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_copy(Hacl_Hash_SHA3_state_t *state);
 
-void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s);
+void Hacl_Hash_SHA3_reset(Hacl_Hash_SHA3_state_t *state);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint32_t len);
+Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t chunk_len);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_finish(Hacl_Streaming_Keccak_state *s, uint8_t *dst);
+Hacl_Hash_SHA3_digest(Hacl_Hash_SHA3_state_t *state, uint8_t *output);
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint32_t l);
+Hacl_Hash_SHA3_squeeze(Hacl_Hash_SHA3_state_t *s, uint8_t *dst, uint32_t l);
 
-uint32_t Hacl_Streaming_Keccak_block_len(Hacl_Streaming_Keccak_state *s);
+uint32_t Hacl_Hash_SHA3_block_len(Hacl_Hash_SHA3_state_t *s);
 
-uint32_t Hacl_Streaming_Keccak_hash_len(Hacl_Streaming_Keccak_state *s);
+uint32_t Hacl_Hash_SHA3_hash_len(Hacl_Hash_SHA3_state_t *s);
 
-bool Hacl_Streaming_Keccak_is_shake(Hacl_Streaming_Keccak_state *s);
+bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s);
 
 void
-Hacl_SHA3_shake128_hacl(
+Hacl_Hash_SHA3_shake128_hacl(
   uint32_t inputByteLen,
   uint8_t *input,
   uint32_t outputByteLen,
@@ -86,25 +86,25 @@ Hacl_SHA3_shake128_hacl(
 );
 
 void
-Hacl_SHA3_shake256_hacl(
+Hacl_Hash_SHA3_shake256_hacl(
   uint32_t inputByteLen,
   uint8_t *input,
   uint32_t outputByteLen,
   uint8_t *output
 );
 
-void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
 
-void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
 
-void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
 
-void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
+void Hacl_Hash_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output);
 
-void Hacl_Impl_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s);
+void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s);
 
 void
-Hacl_Impl_SHA3_squeeze(
+Hacl_Hash_SHA3_squeeze0(
   uint64_t *s,
   uint32_t rateInBytes,
   uint32_t outputByteLen,
@@ -112,7 +112,7 @@ Hacl_Impl_SHA3_squeeze(
 );
 
 void
-Hacl_Impl_SHA3_keccak(
+Hacl_Hash_SHA3_keccak(
   uint32_t rate,
   uint32_t capacity,
   uint32_t inputByteLen,
diff --git a/include/msvc/Hacl_IntTypes_Intrinsics.h b/include/msvc/Hacl_IntTypes_Intrinsics.h
index e2a193e9..c816b046 100644
--- a/include/msvc/Hacl_IntTypes_Intrinsics.h
+++ b/include/msvc/Hacl_IntTypes_Intrinsics.h
@@ -41,7 +41,7 @@ static inline uint32_t
 Hacl_IntTypes_Intrinsics_add_carry_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r)
 {
   uint64_t res = (uint64_t)x + (uint64_t)cin + (uint64_t)y;
-  uint32_t c = (uint32_t)(res >> (uint32_t)32U);
+  uint32_t c = (uint32_t)(res >> 32U);
   r[0U] = (uint32_t)res;
   return c;
 }
@@ -50,7 +50,7 @@ static inline uint32_t
 Hacl_IntTypes_Intrinsics_sub_borrow_u32(uint32_t cin, uint32_t x, uint32_t y, uint32_t *r)
 {
   uint64_t res = (uint64_t)x - (uint64_t)y - (uint64_t)cin;
-  uint32_t c = (uint32_t)(res >> (uint32_t)32U) & (uint32_t)1U;
+  uint32_t c = (uint32_t)(res >> 32U) & 1U;
   r[0U] = (uint32_t)res;
   return c;
 }
@@ -59,8 +59,7 @@ static inline uint64_t
 Hacl_IntTypes_Intrinsics_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y, uint64_t *r)
 {
   uint64_t res = x + cin + y;
-  uint64_t
-  c = (~FStar_UInt64_gte_mask(res, x) | (FStar_UInt64_eq_mask(res, x) & cin)) & (uint64_t)1U;
+  uint64_t c = (~FStar_UInt64_gte_mask(res, x) | (FStar_UInt64_eq_mask(res, x) & cin)) & 1ULL;
   r[0U] = res;
   return c;
 }
@@ -73,7 +72,7 @@ Hacl_IntTypes_Intrinsics_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y, ui
   c =
     ((FStar_UInt64_gte_mask(res, x) & ~FStar_UInt64_eq_mask(res, x))
     | (FStar_UInt64_eq_mask(res, x) & cin))
-    & (uint64_t)1U;
+    & 1ULL;
   r[0U] = res;
   return c;
 }
diff --git a/include/msvc/Hacl_IntTypes_Intrinsics_128.h b/include/msvc/Hacl_IntTypes_Intrinsics_128.h
index aa843a6c..d3008969 100644
--- a/include/msvc/Hacl_IntTypes_Intrinsics_128.h
+++ b/include/msvc/Hacl_IntTypes_Intrinsics_128.h
@@ -45,7 +45,7 @@ Hacl_IntTypes_Intrinsics_128_add_carry_u64(uint64_t cin, uint64_t x, uint64_t y,
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_uint64_to_uint128(x),
         FStar_UInt128_uint64_to_uint128(cin)),
       FStar_UInt128_uint64_to_uint128(y));
-  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
   r[0U] = FStar_UInt128_uint128_to_uint64(res);
   return c;
 }
@@ -58,10 +58,7 @@ Hacl_IntTypes_Intrinsics_128_sub_borrow_u64(uint64_t cin, uint64_t x, uint64_t y
     FStar_UInt128_sub_mod(FStar_UInt128_sub_mod(FStar_UInt128_uint64_to_uint128(x),
         FStar_UInt128_uint64_to_uint128(y)),
       FStar_UInt128_uint64_to_uint128(cin));
-  uint64_t
-  c =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U))
-    & (uint64_t)1U;
+  uint64_t c = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U)) & 1ULL;
   r[0U] = FStar_UInt128_uint128_to_uint64(res);
   return c;
 }
diff --git a/include/Hacl_Streaming_Poly1305_32.h b/include/msvc/Hacl_MAC_Poly1305.h
similarity index 67%
rename from include/Hacl_Streaming_Poly1305_32.h
rename to include/msvc/Hacl_MAC_Poly1305.h
index 88d1a513..95ac4be2 100644
--- a/include/Hacl_Streaming_Poly1305_32.h
+++ b/include/msvc/Hacl_MAC_Poly1305.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Poly1305_32_H
-#define __Hacl_Streaming_Poly1305_32_H
+#ifndef __Hacl_MAC_Poly1305_H
+#define __Hacl_MAC_Poly1305_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -36,43 +36,36 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Streaming_Types.h"
-#include "Hacl_Poly1305_32.h"
+#include "Hacl_Krmllib.h"
 
-typedef struct Hacl_Streaming_Poly1305_32_poly1305_32_state_s
+typedef struct Hacl_MAC_Poly1305_state_t_s
 {
   uint64_t *block_state;
   uint8_t *buf;
   uint64_t total_len;
   uint8_t *p_key;
 }
-Hacl_Streaming_Poly1305_32_poly1305_32_state;
+Hacl_MAC_Poly1305_state_t;
 
-Hacl_Streaming_Poly1305_32_poly1305_32_state *Hacl_Streaming_Poly1305_32_create_in(uint8_t *k);
+Hacl_MAC_Poly1305_state_t *Hacl_MAC_Poly1305_malloc(uint8_t *key);
 
-void
-Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_32_state *s);
+void Hacl_MAC_Poly1305_reset(Hacl_MAC_Poly1305_state_t *state, uint8_t *key);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_32_update(
-  Hacl_Streaming_Poly1305_32_poly1305_32_state *p,
-  uint8_t *data,
-  uint32_t len
-);
+Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint32_t chunk_len);
 
-void
-Hacl_Streaming_Poly1305_32_finish(
-  Hacl_Streaming_Poly1305_32_poly1305_32_state *p,
-  uint8_t *dst
-);
+void Hacl_MAC_Poly1305_digest(Hacl_MAC_Poly1305_state_t *state, uint8_t *output);
 
-void Hacl_Streaming_Poly1305_32_free(Hacl_Streaming_Poly1305_32_poly1305_32_state *s);
+void Hacl_MAC_Poly1305_free(Hacl_MAC_Poly1305_state_t *state);
+
+void Hacl_MAC_Poly1305_mac(uint8_t *output, uint8_t *input, uint32_t input_len, uint8_t *key);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Poly1305_32_H_DEFINED
+#define __Hacl_MAC_Poly1305_H_DEFINED
 #endif
diff --git a/include/msvc/Hacl_Streaming_Poly1305_128.h b/include/msvc/Hacl_MAC_Poly1305_Simd128.h
similarity index 67%
rename from include/msvc/Hacl_Streaming_Poly1305_128.h
rename to include/msvc/Hacl_MAC_Poly1305_Simd128.h
index d6299052..9b69ebd4 100644
--- a/include/msvc/Hacl_Streaming_Poly1305_128.h
+++ b/include/msvc/Hacl_MAC_Poly1305_Simd128.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Poly1305_128_H
-#define __Hacl_Streaming_Poly1305_128_H
+#ifndef __Hacl_MAC_Poly1305_Simd128_H
+#define __Hacl_MAC_Poly1305_Simd128_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -36,44 +36,47 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Streaming_Types.h"
-#include "Hacl_Poly1305_128.h"
+#include "libintvector.h"
 
-typedef struct Hacl_Streaming_Poly1305_128_poly1305_128_state_s
+typedef struct Hacl_MAC_Poly1305_Simd128_state_t_s
 {
   Lib_IntVector_Intrinsics_vec128 *block_state;
   uint8_t *buf;
   uint64_t total_len;
   uint8_t *p_key;
 }
-Hacl_Streaming_Poly1305_128_poly1305_128_state;
+Hacl_MAC_Poly1305_Simd128_state_t;
 
-Hacl_Streaming_Poly1305_128_poly1305_128_state
-*Hacl_Streaming_Poly1305_128_create_in(uint8_t *k);
+Hacl_MAC_Poly1305_Simd128_state_t *Hacl_MAC_Poly1305_Simd128_malloc(uint8_t *key);
 
-void
-Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly1305_128_state *s);
+void Hacl_MAC_Poly1305_Simd128_reset(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *key);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_128_update(
-  Hacl_Streaming_Poly1305_128_poly1305_128_state *p,
-  uint8_t *data,
-  uint32_t len
+Hacl_MAC_Poly1305_Simd128_update(
+  Hacl_MAC_Poly1305_Simd128_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
 );
 
 void
-Hacl_Streaming_Poly1305_128_finish(
-  Hacl_Streaming_Poly1305_128_poly1305_128_state *p,
-  uint8_t *dst
-);
+Hacl_MAC_Poly1305_Simd128_digest(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *output);
+
+void Hacl_MAC_Poly1305_Simd128_free(Hacl_MAC_Poly1305_Simd128_state_t *state);
 
-void Hacl_Streaming_Poly1305_128_free(Hacl_Streaming_Poly1305_128_poly1305_128_state *s);
+void
+Hacl_MAC_Poly1305_Simd128_mac(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key
+);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Poly1305_128_H_DEFINED
+#define __Hacl_MAC_Poly1305_Simd128_H_DEFINED
 #endif
diff --git a/include/Hacl_Streaming_Poly1305_256.h b/include/msvc/Hacl_MAC_Poly1305_Simd256.h
similarity index 67%
rename from include/Hacl_Streaming_Poly1305_256.h
rename to include/msvc/Hacl_MAC_Poly1305_Simd256.h
index 689b837b..89f4a104 100644
--- a/include/Hacl_Streaming_Poly1305_256.h
+++ b/include/msvc/Hacl_MAC_Poly1305_Simd256.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Streaming_Poly1305_256_H
-#define __Hacl_Streaming_Poly1305_256_H
+#ifndef __Hacl_MAC_Poly1305_Simd256_H
+#define __Hacl_MAC_Poly1305_Simd256_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -36,44 +36,47 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Streaming_Types.h"
-#include "Hacl_Poly1305_256.h"
+#include "libintvector.h"
 
-typedef struct Hacl_Streaming_Poly1305_256_poly1305_256_state_s
+typedef struct Hacl_MAC_Poly1305_Simd256_state_t_s
 {
   Lib_IntVector_Intrinsics_vec256 *block_state;
   uint8_t *buf;
   uint64_t total_len;
   uint8_t *p_key;
 }
-Hacl_Streaming_Poly1305_256_poly1305_256_state;
+Hacl_MAC_Poly1305_Simd256_state_t;
 
-Hacl_Streaming_Poly1305_256_poly1305_256_state
-*Hacl_Streaming_Poly1305_256_create_in(uint8_t *k);
+Hacl_MAC_Poly1305_Simd256_state_t *Hacl_MAC_Poly1305_Simd256_malloc(uint8_t *key);
 
-void
-Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly1305_256_state *s);
+void Hacl_MAC_Poly1305_Simd256_reset(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *key);
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_256_update(
-  Hacl_Streaming_Poly1305_256_poly1305_256_state *p,
-  uint8_t *data,
-  uint32_t len
+Hacl_MAC_Poly1305_Simd256_update(
+  Hacl_MAC_Poly1305_Simd256_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
 );
 
 void
-Hacl_Streaming_Poly1305_256_finish(
-  Hacl_Streaming_Poly1305_256_poly1305_256_state *p,
-  uint8_t *dst
-);
+Hacl_MAC_Poly1305_Simd256_digest(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *output);
+
+void Hacl_MAC_Poly1305_Simd256_free(Hacl_MAC_Poly1305_Simd256_state_t *state);
 
-void Hacl_Streaming_Poly1305_256_free(Hacl_Streaming_Poly1305_256_poly1305_256_state *s);
+void
+Hacl_MAC_Poly1305_Simd256_mac(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key
+);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Streaming_Poly1305_256_H_DEFINED
+#define __Hacl_MAC_Poly1305_Simd256_H_DEFINED
 #endif
diff --git a/include/msvc/Hacl_NaCl.h b/include/msvc/Hacl_NaCl.h
index b7e91a4b..a3ca6804 100644
--- a/include/msvc/Hacl_NaCl.h
+++ b/include/msvc/Hacl_NaCl.h
@@ -36,7 +36,7 @@ extern "C" {
 #include "krml/internal/target.h"
 
 #include "Hacl_Salsa20.h"
-#include "Hacl_Poly1305_32.h"
+#include "Hacl_MAC_Poly1305.h"
 #include "Hacl_Curve25519_51.h"
 
 /**
diff --git a/include/msvc/Hacl_Poly1305_128.h b/include/msvc/Hacl_Poly1305_128.h
deleted file mode 100644
index 834d4a8a..00000000
--- a/include/msvc/Hacl_Poly1305_128.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Poly1305_128_H
-#define __Hacl_Poly1305_128_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "libintvector.h"
-
-typedef Lib_IntVector_Intrinsics_vec128 *Hacl_Poly1305_128_poly1305_ctx;
-
-void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key);
-
-void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *text);
-
-void
-Hacl_Poly1305_128_poly1305_update(
-  Lib_IntVector_Intrinsics_vec128 *ctx,
-  uint32_t len,
-  uint8_t *text
-);
-
-void
-Hacl_Poly1305_128_poly1305_finish(
-  uint8_t *tag,
-  uint8_t *key,
-  Lib_IntVector_Intrinsics_vec128 *ctx
-);
-
-void Hacl_Poly1305_128_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Poly1305_128_H_DEFINED
-#endif
diff --git a/include/msvc/Hacl_Poly1305_32.h b/include/msvc/Hacl_Poly1305_32.h
deleted file mode 100644
index f3233b90..00000000
--- a/include/msvc/Hacl_Poly1305_32.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Poly1305_32_H
-#define __Hacl_Poly1305_32_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Krmllib.h"
-
-typedef uint64_t *Hacl_Poly1305_32_poly1305_ctx;
-
-void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key);
-
-void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text);
-
-void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text);
-
-void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx);
-
-void Hacl_Poly1305_32_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Poly1305_32_H_DEFINED
-#endif
diff --git a/include/msvc/Hacl_Streaming_Blake2.h b/include/msvc/Hacl_Streaming_Blake2.h
deleted file mode 100644
index bfb05e4f..00000000
--- a/include/msvc/Hacl_Streaming_Blake2.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#ifndef __Hacl_Streaming_Blake2_H
-#define __Hacl_Streaming_Blake2_H
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#include <string.h>
-#include "krml/internal/types.h"
-#include "krml/lowstar_endianness.h"
-#include "krml/internal/target.h"
-
-#include "Hacl_Streaming_Types.h"
-#include "Hacl_Krmllib.h"
-#include "Hacl_Hash_Blake2.h"
-
-typedef struct Hacl_Streaming_Blake2_blake2s_32_block_state_s
-{
-  uint32_t *fst;
-  uint32_t *snd;
-}
-Hacl_Streaming_Blake2_blake2s_32_block_state;
-
-typedef struct Hacl_Streaming_Blake2_blake2b_32_block_state_s
-{
-  uint64_t *fst;
-  uint64_t *snd;
-}
-Hacl_Streaming_Blake2_blake2b_32_block_state;
-
-typedef struct Hacl_Streaming_Blake2_blake2s_32_state_s
-{
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state;
-  uint8_t *buf;
-  uint64_t total_len;
-}
-Hacl_Streaming_Blake2_blake2s_32_state;
-
-typedef struct Hacl_Streaming_Blake2_blake2b_32_state_s
-{
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state;
-  uint8_t *buf;
-  uint64_t total_len;
-}
-Hacl_Streaming_Blake2_blake2b_32_state;
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2_blake2s_32_state
-*Hacl_Streaming_Blake2_blake2s_32_no_key_create_in(void);
-
-/**
-  (Re-)initialization function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_32_state *s1);
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2_blake2s_32_no_key_update(
-  Hacl_Streaming_Blake2_blake2s_32_state *p,
-  uint8_t *data,
-  uint32_t len
-);
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2_blake2s_32_no_key_finish(
-  Hacl_Streaming_Blake2_blake2s_32_state *p,
-  uint8_t *dst
-);
-
-/**
-  Free state function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2s_32_no_key_free(Hacl_Streaming_Blake2_blake2s_32_state *s1);
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2_blake2b_32_state
-*Hacl_Streaming_Blake2_blake2b_32_no_key_create_in(void);
-
-/**
-  (Re)-initialization function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_32_state *s1);
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2_blake2b_32_no_key_update(
-  Hacl_Streaming_Blake2_blake2b_32_state *p,
-  uint8_t *data,
-  uint32_t len
-);
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
-  Hacl_Streaming_Blake2_blake2b_32_state *p,
-  uint8_t *dst
-);
-
-/**
-  Free state function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2b_32_no_key_free(Hacl_Streaming_Blake2_blake2b_32_state *s1);
-
-#if defined(__cplusplus)
-}
-#endif
-
-#define __Hacl_Streaming_Blake2_H_DEFINED
-#endif
diff --git a/include/msvc/internal/EverCrypt_HMAC.h b/include/msvc/internal/EverCrypt_HMAC.h
index 02986e6c..debea462 100644
--- a/include/msvc/internal/EverCrypt_HMAC.h
+++ b/include/msvc/internal/EverCrypt_HMAC.h
@@ -38,7 +38,9 @@ extern "C" {
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b.h"
+#include "internal/Hacl_HMAC.h"
 #include "internal/EverCrypt_Hash.h"
 #include "../EverCrypt_HMAC.h"
 
diff --git a/include/msvc/internal/EverCrypt_Hash.h b/include/msvc/internal/EverCrypt_Hash.h
index c9417677..cd706161 100644
--- a/include/msvc/internal/EverCrypt_Hash.h
+++ b/include/msvc/internal/EverCrypt_Hash.h
@@ -41,11 +41,15 @@ extern "C" {
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
 #include "internal/Hacl_Hash_MD5.h"
+#include "internal/Hacl_Hash_Blake2s_Simd128.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b_Simd256.h"
+#include "internal/Hacl_Hash_Blake2b.h"
 #include "../EverCrypt_Hash.h"
 
 void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n);
 
-void EverCrypt_Hash_Incremental_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst);
+void EverCrypt_Hash_Incremental_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/internal/Hacl_Bignum25519_51.h b/include/msvc/internal/Hacl_Bignum25519_51.h
index 25a10503..4678f8a0 100644
--- a/include/msvc/internal/Hacl_Bignum25519_51.h
+++ b/include/msvc/internal/Hacl_Bignum25519_51.h
@@ -69,11 +69,11 @@ static inline void Hacl_Impl_Curve25519_Field51_fsub(uint64_t *out, uint64_t *f1
   uint64_t f23 = f2[3U];
   uint64_t f14 = f1[4U];
   uint64_t f24 = f2[4U];
-  out[0U] = f10 + (uint64_t)0x3fffffffffff68U - f20;
-  out[1U] = f11 + (uint64_t)0x3ffffffffffff8U - f21;
-  out[2U] = f12 + (uint64_t)0x3ffffffffffff8U - f22;
-  out[3U] = f13 + (uint64_t)0x3ffffffffffff8U - f23;
-  out[4U] = f14 + (uint64_t)0x3ffffffffffff8U - f24;
+  out[0U] = f10 + 0x3fffffffffff68ULL - f20;
+  out[1U] = f11 + 0x3ffffffffffff8ULL - f21;
+  out[2U] = f12 + 0x3ffffffffffff8ULL - f22;
+  out[3U] = f13 + 0x3ffffffffffff8ULL - f23;
+  out[4U] = f14 + 0x3ffffffffffff8ULL - f24;
 }
 
 static inline void
@@ -84,7 +84,7 @@ Hacl_Impl_Curve25519_Field51_fmul(
   FStar_UInt128_uint128 *uu___
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f1[0U];
   uint64_t f11 = f1[1U];
   uint64_t f12 = f1[2U];
@@ -95,10 +95,10 @@ Hacl_Impl_Curve25519_Field51_fmul(
   uint64_t f22 = f2[2U];
   uint64_t f23 = f2[3U];
   uint64_t f24 = f2[4U];
-  uint64_t tmp1 = f21 * (uint64_t)19U;
-  uint64_t tmp2 = f22 * (uint64_t)19U;
-  uint64_t tmp3 = f23 * (uint64_t)19U;
-  uint64_t tmp4 = f24 * (uint64_t)19U;
+  uint64_t tmp1 = f21 * 19ULL;
+  uint64_t tmp2 = f22 * 19ULL;
+  uint64_t tmp3 = f23 * 19ULL;
+  uint64_t tmp4 = f24 * 19ULL;
   FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20);
   FStar_UInt128_uint128 o10 = FStar_UInt128_mul_wide(f10, f21);
   FStar_UInt128_uint128 o20 = FStar_UInt128_mul_wide(f10, f22);
@@ -129,25 +129,24 @@ Hacl_Impl_Curve25519_Field51_fmul(
   FStar_UInt128_uint128 tmp_w2 = o24;
   FStar_UInt128_uint128 tmp_w3 = o34;
   FStar_UInt128_uint128 tmp_w4 = o44;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp01 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp01 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp11 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp11 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp21 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp21 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp31 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp31 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp41 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp01 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp41 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp01 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp11 + c5;
   uint64_t o2 = tmp21;
@@ -168,7 +167,7 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   FStar_UInt128_uint128 *uu___
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f1[0U];
   uint64_t f11 = f1[1U];
   uint64_t f12 = f1[2U];
@@ -189,14 +188,14 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   uint64_t f42 = f2[7U];
   uint64_t f43 = f2[8U];
   uint64_t f44 = f2[9U];
-  uint64_t tmp11 = f21 * (uint64_t)19U;
-  uint64_t tmp12 = f22 * (uint64_t)19U;
-  uint64_t tmp13 = f23 * (uint64_t)19U;
-  uint64_t tmp14 = f24 * (uint64_t)19U;
-  uint64_t tmp21 = f41 * (uint64_t)19U;
-  uint64_t tmp22 = f42 * (uint64_t)19U;
-  uint64_t tmp23 = f43 * (uint64_t)19U;
-  uint64_t tmp24 = f44 * (uint64_t)19U;
+  uint64_t tmp11 = f21 * 19ULL;
+  uint64_t tmp12 = f22 * 19ULL;
+  uint64_t tmp13 = f23 * 19ULL;
+  uint64_t tmp14 = f24 * 19ULL;
+  uint64_t tmp21 = f41 * 19ULL;
+  uint64_t tmp22 = f42 * 19ULL;
+  uint64_t tmp23 = f43 * 19ULL;
+  uint64_t tmp24 = f44 * 19ULL;
   FStar_UInt128_uint128 o00 = FStar_UInt128_mul_wide(f10, f20);
   FStar_UInt128_uint128 o15 = FStar_UInt128_mul_wide(f10, f21);
   FStar_UInt128_uint128 o25 = FStar_UInt128_mul_wide(f10, f22);
@@ -257,49 +256,47 @@ Hacl_Impl_Curve25519_Field51_fmul2(
   FStar_UInt128_uint128 tmp_w22 = o241;
   FStar_UInt128_uint128 tmp_w23 = o34;
   FStar_UInt128_uint128 tmp_w24 = o44;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w10, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w10, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w11, FStar_UInt128_uint64_to_uint128(c00));
-  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w12, FStar_UInt128_uint64_to_uint128(c10));
-  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w13, FStar_UInt128_uint64_to_uint128(c20));
-  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w14, FStar_UInt128_uint64_to_uint128(c30));
-  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c50 = l_4 >> (uint32_t)51U;
+  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp00 + c40 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c50 = l_4 >> 51U;
   uint64_t o100 = tmp0_;
   uint64_t o112 = tmp10 + c50;
   uint64_t o122 = tmp20;
   uint64_t o132 = tmp30;
   uint64_t o142 = tmp40;
-  FStar_UInt128_uint128
-  l_5 = FStar_UInt128_add(tmp_w20, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U));
+  FStar_UInt128_uint128 l_5 = FStar_UInt128_add(tmp_w20, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, 51U));
   FStar_UInt128_uint128 l_6 = FStar_UInt128_add(tmp_w21, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, 51U));
   FStar_UInt128_uint128 l_7 = FStar_UInt128_add(tmp_w22, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, 51U));
   FStar_UInt128_uint128 l_8 = FStar_UInt128_add(tmp_w23, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, 51U));
   FStar_UInt128_uint128 l_9 = FStar_UInt128_add(tmp_w24, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U));
-  uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_10 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, 51U));
+  uint64_t l_10 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_0 = l_10 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_10 >> 51U;
   uint64_t o200 = tmp0_0;
   uint64_t o212 = tmp1 + c5;
   uint64_t o222 = tmp2;
@@ -339,25 +336,24 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f
   FStar_UInt128_uint128 tmp_w2 = FStar_UInt128_mul_wide(f2, f12);
   FStar_UInt128_uint128 tmp_w3 = FStar_UInt128_mul_wide(f2, f13);
   FStar_UInt128_uint128 tmp_w4 = FStar_UInt128_mul_wide(f2, f14);
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(tmp_w0, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(tmp_w1, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(tmp_w2, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(tmp_w3, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(tmp_w4, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp1 + c5;
   uint64_t o2 = tmp2;
@@ -373,18 +369,18 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f
 static inline void
 Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f0 = f[0U];
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t d0 = (uint64_t)2U * f0;
-  uint64_t d1 = (uint64_t)2U * f1;
-  uint64_t d2 = (uint64_t)38U * f2;
-  uint64_t d3 = (uint64_t)19U * f3;
-  uint64_t d419 = (uint64_t)19U * f4;
-  uint64_t d4 = (uint64_t)2U * d419;
+  uint64_t d0 = 2ULL * f0;
+  uint64_t d1 = 2ULL * f1;
+  uint64_t d2 = 38ULL * f2;
+  uint64_t d3 = 19ULL * f3;
+  uint64_t d419 = 19ULL * f4;
+  uint64_t d4 = 2ULL * d419;
   FStar_UInt128_uint128
   s0 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f0, f0),
@@ -415,25 +411,24 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint
   FStar_UInt128_uint128 o20 = s2;
   FStar_UInt128_uint128 o30 = s3;
   FStar_UInt128_uint128 o40 = s4;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(o00, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(o00, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o10, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o20, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o30, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o40, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t o0 = tmp0_;
   uint64_t o1 = tmp1 + c5;
   uint64_t o2 = tmp2;
@@ -449,7 +444,7 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint
 static inline void
 Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint64_t f10 = f[0U];
   uint64_t f11 = f[1U];
   uint64_t f12 = f[2U];
@@ -460,12 +455,12 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   uint64_t f22 = f[7U];
   uint64_t f23 = f[8U];
   uint64_t f24 = f[9U];
-  uint64_t d00 = (uint64_t)2U * f10;
-  uint64_t d10 = (uint64_t)2U * f11;
-  uint64_t d20 = (uint64_t)38U * f12;
-  uint64_t d30 = (uint64_t)19U * f13;
-  uint64_t d4190 = (uint64_t)19U * f14;
-  uint64_t d40 = (uint64_t)2U * d4190;
+  uint64_t d00 = 2ULL * f10;
+  uint64_t d10 = 2ULL * f11;
+  uint64_t d20 = 38ULL * f12;
+  uint64_t d30 = 19ULL * f13;
+  uint64_t d4190 = 19ULL * f14;
+  uint64_t d40 = 2ULL * d4190;
   FStar_UInt128_uint128
   s00 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f10, f10),
@@ -496,12 +491,12 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   FStar_UInt128_uint128 o120 = s20;
   FStar_UInt128_uint128 o130 = s30;
   FStar_UInt128_uint128 o140 = s40;
-  uint64_t d0 = (uint64_t)2U * f20;
-  uint64_t d1 = (uint64_t)2U * f21;
-  uint64_t d2 = (uint64_t)38U * f22;
-  uint64_t d3 = (uint64_t)19U * f23;
-  uint64_t d419 = (uint64_t)19U * f24;
-  uint64_t d4 = (uint64_t)2U * d419;
+  uint64_t d0 = 2ULL * f20;
+  uint64_t d1 = 2ULL * f21;
+  uint64_t d2 = 38ULL * f22;
+  uint64_t d3 = 19ULL * f23;
+  uint64_t d419 = 19ULL * f24;
+  uint64_t d4 = 2ULL * d419;
   FStar_UInt128_uint128
   s0 =
     FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_mul_wide(f20, f20),
@@ -532,49 +527,47 @@ Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uin
   FStar_UInt128_uint128 o220 = s2;
   FStar_UInt128_uint128 o230 = s3;
   FStar_UInt128_uint128 o240 = s4;
-  FStar_UInt128_uint128
-  l_ = FStar_UInt128_add(o100, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, (uint32_t)51U));
+  FStar_UInt128_uint128 l_ = FStar_UInt128_add(o100, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp00 = FStar_UInt128_uint128_to_uint64(l_) & 0x7ffffffffffffULL;
+  uint64_t c00 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_, 51U));
   FStar_UInt128_uint128 l_0 = FStar_UInt128_add(o110, FStar_UInt128_uint64_to_uint128(c00));
-  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, (uint32_t)51U));
+  uint64_t tmp10 = FStar_UInt128_uint128_to_uint64(l_0) & 0x7ffffffffffffULL;
+  uint64_t c10 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_0, 51U));
   FStar_UInt128_uint128 l_1 = FStar_UInt128_add(o120, FStar_UInt128_uint64_to_uint128(c10));
-  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, (uint32_t)51U));
+  uint64_t tmp20 = FStar_UInt128_uint128_to_uint64(l_1) & 0x7ffffffffffffULL;
+  uint64_t c20 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_1, 51U));
   FStar_UInt128_uint128 l_2 = FStar_UInt128_add(o130, FStar_UInt128_uint64_to_uint128(c20));
-  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, (uint32_t)51U));
+  uint64_t tmp30 = FStar_UInt128_uint128_to_uint64(l_2) & 0x7ffffffffffffULL;
+  uint64_t c30 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_2, 51U));
   FStar_UInt128_uint128 l_3 = FStar_UInt128_add(o140, FStar_UInt128_uint64_to_uint128(c30));
-  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, (uint32_t)51U));
-  uint64_t l_4 = tmp00 + c40 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c50 = l_4 >> (uint32_t)51U;
+  uint64_t tmp40 = FStar_UInt128_uint128_to_uint64(l_3) & 0x7ffffffffffffULL;
+  uint64_t c40 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_3, 51U));
+  uint64_t l_4 = tmp00 + c40 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c50 = l_4 >> 51U;
   uint64_t o101 = tmp0_;
   uint64_t o111 = tmp10 + c50;
   uint64_t o121 = tmp20;
   uint64_t o131 = tmp30;
   uint64_t o141 = tmp40;
-  FStar_UInt128_uint128
-  l_5 = FStar_UInt128_add(o200, FStar_UInt128_uint64_to_uint128((uint64_t)0U));
-  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, (uint32_t)51U));
+  FStar_UInt128_uint128 l_5 = FStar_UInt128_add(o200, FStar_UInt128_uint64_to_uint128(0ULL));
+  uint64_t tmp0 = FStar_UInt128_uint128_to_uint64(l_5) & 0x7ffffffffffffULL;
+  uint64_t c0 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_5, 51U));
   FStar_UInt128_uint128 l_6 = FStar_UInt128_add(o210, FStar_UInt128_uint64_to_uint128(c0));
-  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, (uint32_t)51U));
+  uint64_t tmp1 = FStar_UInt128_uint128_to_uint64(l_6) & 0x7ffffffffffffULL;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_6, 51U));
   FStar_UInt128_uint128 l_7 = FStar_UInt128_add(o220, FStar_UInt128_uint64_to_uint128(c1));
-  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, (uint32_t)51U));
+  uint64_t tmp2 = FStar_UInt128_uint128_to_uint64(l_7) & 0x7ffffffffffffULL;
+  uint64_t c2 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_7, 51U));
   FStar_UInt128_uint128 l_8 = FStar_UInt128_add(o230, FStar_UInt128_uint64_to_uint128(c2));
-  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, (uint32_t)51U));
+  uint64_t tmp3 = FStar_UInt128_uint128_to_uint64(l_8) & 0x7ffffffffffffULL;
+  uint64_t c3 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_8, 51U));
   FStar_UInt128_uint128 l_9 = FStar_UInt128_add(o240, FStar_UInt128_uint64_to_uint128(c3));
-  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, (uint32_t)51U));
-  uint64_t l_10 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_0 = l_10 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_10 >> (uint32_t)51U;
+  uint64_t tmp4 = FStar_UInt128_uint128_to_uint64(l_9) & 0x7ffffffffffffULL;
+  uint64_t c4 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(l_9, 51U));
+  uint64_t l_10 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_0 = l_10 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_10 >> 51U;
   uint64_t o201 = tmp0_0;
   uint64_t o211 = tmp1 + c5;
   uint64_t o221 = tmp2;
@@ -609,49 +602,49 @@ static inline void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t l_ = f0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = f0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = f1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = f2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = f3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = f4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t f01 = tmp0_;
   uint64_t f11 = tmp1 + c5;
   uint64_t f21 = tmp2;
   uint64_t f31 = tmp3;
   uint64_t f41 = tmp4;
-  uint64_t m0 = FStar_UInt64_gte_mask(f01, (uint64_t)0x7ffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f11, (uint64_t)0x7ffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f21, (uint64_t)0x7ffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f31, (uint64_t)0x7ffffffffffffU);
-  uint64_t m4 = FStar_UInt64_eq_mask(f41, (uint64_t)0x7ffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f01, 0x7ffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f11, 0x7ffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f21, 0x7ffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f31, 0x7ffffffffffffULL);
+  uint64_t m4 = FStar_UInt64_eq_mask(f41, 0x7ffffffffffffULL);
   uint64_t mask = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t f0_ = f01 - (mask & (uint64_t)0x7ffffffffffedU);
-  uint64_t f1_ = f11 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f2_ = f21 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f3_ = f31 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f4_ = f41 - (mask & (uint64_t)0x7ffffffffffffU);
+  uint64_t f0_ = f01 - (mask & 0x7ffffffffffedULL);
+  uint64_t f1_ = f11 - (mask & 0x7ffffffffffffULL);
+  uint64_t f2_ = f21 - (mask & 0x7ffffffffffffULL);
+  uint64_t f3_ = f31 - (mask & 0x7ffffffffffffULL);
+  uint64_t f4_ = f41 - (mask & 0x7ffffffffffffULL);
   uint64_t f02 = f0_;
   uint64_t f12 = f1_;
   uint64_t f22 = f2_;
   uint64_t f32 = f3_;
   uint64_t f42 = f4_;
-  uint64_t o00 = f02 | f12 << (uint32_t)51U;
-  uint64_t o10 = f12 >> (uint32_t)13U | f22 << (uint32_t)38U;
-  uint64_t o20 = f22 >> (uint32_t)26U | f32 << (uint32_t)25U;
-  uint64_t o30 = f32 >> (uint32_t)39U | f42 << (uint32_t)12U;
+  uint64_t o00 = f02 | f12 << 51U;
+  uint64_t o10 = f12 >> 13U | f22 << 38U;
+  uint64_t o20 = f22 >> 26U | f32 << 25U;
+  uint64_t o30 = f32 >> 39U | f42 << 12U;
   uint64_t o0 = o00;
   uint64_t o1 = o10;
   uint64_t o2 = o20;
@@ -665,11 +658,11 @@ static inline void Hacl_Impl_Curve25519_Field51_store_felem(uint64_t *u64s, uint
 static inline void
 Hacl_Impl_Curve25519_Field51_cswap2(uint64_t bit, uint64_t *p1, uint64_t *p2)
 {
-  uint64_t mask = (uint64_t)0U - bit;
+  uint64_t mask = 0ULL - bit;
   KRML_MAYBE_FOR10(i,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
+    0U,
+    10U,
+    1U,
     uint64_t dummy = mask & (p1[i] ^ p2[i]);
     p1[i] = p1[i] ^ dummy;
     p2[i] = p2[i] ^ dummy;);
diff --git a/include/msvc/internal/Hacl_Bignum_Base.h b/include/msvc/internal/Hacl_Bignum_Base.h
index e4d35fe9..bafd4896 100644
--- a/include/msvc/internal/Hacl_Bignum_Base.h
+++ b/include/msvc/internal/Hacl_Bignum_Base.h
@@ -45,7 +45,7 @@ Hacl_Bignum_Base_mul_wide_add2_u32(uint32_t a, uint32_t b, uint32_t c_in, uint32
   uint32_t out0 = out[0U];
   uint64_t res = (uint64_t)a * (uint64_t)b + (uint64_t)c_in + (uint64_t)out0;
   out[0U] = (uint32_t)res;
-  return (uint32_t)(res >> (uint32_t)32U);
+  return (uint32_t)(res >> 32U);
 }
 
 static inline uint64_t
@@ -58,22 +58,22 @@ Hacl_Bignum_Base_mul_wide_add2_u64(uint64_t a, uint64_t b, uint64_t c_in, uint64
         FStar_UInt128_uint64_to_uint128(c_in)),
       FStar_UInt128_uint64_to_uint128(out0));
   out[0U] = FStar_UInt128_uint128_to_uint64(res);
-  return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+  return FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
 }
 
 static inline void
 Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -82,24 +82,24 @@ Hacl_Bignum_Convert_bn_from_bytes_be_uint64(uint32_t len, uint8_t *b, uint64_t *
 static inline void
 Hacl_Bignum_Convert_bn_to_bytes_be_uint64(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]);
+    store64_be(tmp + i * 8U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
 
 static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32_t *b)
 {
-  uint32_t priv = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t priv = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
-    uint32_t mask = FStar_UInt32_eq_mask(b[i], (uint32_t)0U);
+    uint32_t mask = FStar_UInt32_eq_mask(b[i], 0U);
     priv = (mask & priv) | (~mask & i);
   }
   return priv;
@@ -107,10 +107,10 @@ static inline uint32_t Hacl_Bignum_Lib_bn_get_top_index_u32(uint32_t len, uint32
 
 static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64_t *b)
 {
-  uint64_t priv = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t priv = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
-    uint64_t mask = FStar_UInt64_eq_mask(b[i], (uint64_t)0U);
+    uint64_t mask = FStar_UInt64_eq_mask(b[i], 0ULL);
     priv = (mask & priv) | (~mask & (uint64_t)i);
   }
   return priv;
@@ -119,63 +119,63 @@ static inline uint64_t Hacl_Bignum_Lib_bn_get_top_index_u64(uint32_t len, uint64
 static inline uint32_t
 Hacl_Bignum_Lib_bn_get_bits_u32(uint32_t len, uint32_t *b, uint32_t i, uint32_t l)
 {
-  uint32_t i1 = i / (uint32_t)32U;
-  uint32_t j = i % (uint32_t)32U;
+  uint32_t i1 = i / 32U;
+  uint32_t j = i % 32U;
   uint32_t p1 = b[i1] >> j;
   uint32_t ite;
-  if (i1 + (uint32_t)1U < len && (uint32_t)0U < j)
+  if (i1 + 1U < len && 0U < j)
   {
-    ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)32U - j);
+    ite = p1 | b[i1 + 1U] << (32U - j);
   }
   else
   {
     ite = p1;
   }
-  return ite & (((uint32_t)1U << l) - (uint32_t)1U);
+  return ite & ((1U << l) - 1U);
 }
 
 static inline uint64_t
 Hacl_Bignum_Lib_bn_get_bits_u64(uint32_t len, uint64_t *b, uint32_t i, uint32_t l)
 {
-  uint32_t i1 = i / (uint32_t)64U;
-  uint32_t j = i % (uint32_t)64U;
+  uint32_t i1 = i / 64U;
+  uint32_t j = i % 64U;
   uint64_t p1 = b[i1] >> j;
   uint64_t ite;
-  if (i1 + (uint32_t)1U < len && (uint32_t)0U < j)
+  if (i1 + 1U < len && 0U < j)
   {
-    ite = p1 | b[i1 + (uint32_t)1U] << ((uint32_t)64U - j);
+    ite = p1 | b[i1 + 1U] << (64U - j);
   }
   else
   {
     ite = p1;
   }
-  return ite & (((uint64_t)1U << l) - (uint64_t)1U);
+  return ite & ((1ULL << l) - 1ULL);
 }
 
 static inline uint32_t
 Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -188,27 +188,27 @@ Hacl_Bignum_Addition_bn_sub_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b,
 static inline uint64_t
 Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -221,27 +221,27 @@ Hacl_Bignum_Addition_bn_sub_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b,
 static inline uint32_t
 Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -254,27 +254,27 @@ Hacl_Bignum_Addition_bn_add_eq_len_u32(uint32_t aLen, uint32_t *a, uint32_t *b,
 static inline uint64_t
 Hacl_Bignum_Addition_bn_add_eq_len_u64(uint32_t aLen, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < aLen / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+  for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -294,27 +294,27 @@ Hacl_Bignum_Multiplication_bn_mul_u32(
 )
 {
   memset(res, 0U, (aLen + bLen) * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bLen; i0++)
+  for (uint32_t i0 = 0U; i0 < bLen; i0++)
   {
     uint32_t bj = b[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < aLen / 4U; i++)
     {
-      uint32_t a_i = a[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = a[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0);
-      uint32_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = a[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1);
-      uint32_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = a[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2);
-      uint32_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = a[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+    for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
     {
       uint32_t a_i = a[i];
       uint32_t *res_i = res_j + i;
@@ -335,27 +335,27 @@ Hacl_Bignum_Multiplication_bn_mul_u64(
 )
 {
   memset(res, 0U, (aLen + bLen) * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bLen; i0++)
+  for (uint32_t i0 = 0U; i0 < bLen; i0++)
   {
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < aLen / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < aLen / 4U; i++)
     {
-      uint64_t a_i = a[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = a[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = aLen / (uint32_t)4U * (uint32_t)4U; i < aLen; i++)
+    for (uint32_t i = aLen / 4U * 4U; i < aLen; i++)
     {
       uint64_t a_i = a[i];
       uint64_t *res_i = res_j + i;
@@ -370,28 +370,28 @@ static inline void
 Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res)
 {
   memset(res, 0U, (aLen + aLen) * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < aLen; i0++)
+  for (uint32_t i0 = 0U; i0 < aLen; i0++)
   {
     uint32_t *ab = a;
     uint32_t a_j = a[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -401,48 +401,48 @@ Hacl_Bignum_Multiplication_bn_sqr_u32(uint32_t aLen, uint32_t *a, uint32_t *res)
     res[i0 + i0] = r;
   }
   uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  KRML_MAYBE_UNUSED_VAR(c0);
   KRML_CHECK_SIZE(sizeof (uint32_t), aLen + aLen);
   uint32_t *tmp = (uint32_t *)alloca((aLen + aLen) * sizeof (uint32_t));
   memset(tmp, 0U, (aLen + aLen) * sizeof (uint32_t));
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i];
-    uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res1 >> 32U);
     uint32_t lo = (uint32_t)res1;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;
   }
   uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen + aLen, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void
 Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res)
 {
   memset(res, 0U, (aLen + aLen) * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < aLen; i0++)
+  for (uint32_t i0 = 0U; i0 < aLen; i0++)
   {
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -452,20 +452,20 @@ Hacl_Bignum_Multiplication_bn_sqr_u64(uint32_t aLen, uint64_t *a, uint64_t *res)
     res[i0 + i0] = r;
   }
   uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  KRML_MAYBE_UNUSED_VAR(c0);
   KRML_CHECK_SIZE(sizeof (uint64_t), aLen + aLen);
   uint64_t *tmp = (uint64_t *)alloca((aLen + aLen) * sizeof (uint64_t));
   memset(tmp, 0U, (aLen + aLen) * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;
   }
   uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen + aLen, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_Bignum_K256.h b/include/msvc/internal/Hacl_Bignum_K256.h
index 59aff176..fe72fffe 100644
--- a/include/msvc/internal/Hacl_Bignum_K256.h
+++ b/include/msvc/internal/Hacl_Bignum_K256.h
@@ -45,13 +45,7 @@ static inline bool Hacl_K256_Field_is_felem_zero_vartime(uint64_t *f)
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  return
-    f0
-    == (uint64_t)0U
-    && f1 == (uint64_t)0U
-    && f2 == (uint64_t)0U
-    && f3 == (uint64_t)0U
-    && f4 == (uint64_t)0U;
+  return f0 == 0ULL && f1 == 0ULL && f2 == 0ULL && f3 == 0ULL && f4 == 0ULL;
 }
 
 static inline bool Hacl_K256_Field_is_felem_eq_vartime(uint64_t *f1, uint64_t *f2)
@@ -76,42 +70,42 @@ static inline bool Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(uint64_
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
   uint64_t f4 = f[4U];
-  if (f4 > (uint64_t)0U)
+  if (f4 > 0ULL)
   {
     return false;
   }
-  if (f3 > (uint64_t)0U)
+  if (f3 > 0ULL)
   {
     return false;
   }
-  if (f2 < (uint64_t)0x1455123U)
+  if (f2 < 0x1455123ULL)
   {
     return true;
   }
-  if (f2 > (uint64_t)0x1455123U)
+  if (f2 > 0x1455123ULL)
   {
     return false;
   }
-  if (f1 < (uint64_t)0x1950b75fc4402U)
+  if (f1 < 0x1950b75fc4402ULL)
   {
     return true;
   }
-  if (f1 > (uint64_t)0x1950b75fc4402U)
+  if (f1 > 0x1950b75fc4402ULL)
   {
     return false;
   }
-  return f0 < (uint64_t)0xda1722fc9baeeU;
+  return f0 < 0xda1722fc9baeeULL;
 }
 
 static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b)
 {
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = b + i * (uint32_t)8U;
+    uint8_t *bj = b + i * 8U;
     uint64_t u = load64_be(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -120,11 +114,11 @@ static inline void Hacl_K256_Field_load_felem(uint64_t *f, uint8_t *b)
   uint64_t s1 = tmp[2U];
   uint64_t s2 = tmp[1U];
   uint64_t s3 = tmp[0U];
-  uint64_t f00 = s0 & (uint64_t)0xfffffffffffffU;
-  uint64_t f10 = s0 >> (uint32_t)52U | (s1 & (uint64_t)0xffffffffffU) << (uint32_t)12U;
-  uint64_t f20 = s1 >> (uint32_t)40U | (s2 & (uint64_t)0xfffffffU) << (uint32_t)24U;
-  uint64_t f30 = s2 >> (uint32_t)28U | (s3 & (uint64_t)0xffffU) << (uint32_t)36U;
-  uint64_t f40 = s3 >> (uint32_t)16U;
+  uint64_t f00 = s0 & 0xfffffffffffffULL;
+  uint64_t f10 = s0 >> 52U | (s1 & 0xffffffffffULL) << 12U;
+  uint64_t f20 = s1 >> 40U | (s2 & 0xfffffffULL) << 24U;
+  uint64_t f30 = s2 >> 28U | (s3 & 0xffffULL) << 36U;
+  uint64_t f40 = s3 >> 16U;
   uint64_t f0 = f00;
   uint64_t f1 = f10;
   uint64_t f2 = f20;
@@ -148,11 +142,11 @@ static inline bool Hacl_K256_Field_load_felem_lt_prime_vartime(uint64_t *f, uint
   bool
   is_ge_p =
     f0
-    >= (uint64_t)0xffffefffffc2fU
-    && f1 == (uint64_t)0xfffffffffffffU
-    && f2 == (uint64_t)0xfffffffffffffU
-    && f3 == (uint64_t)0xfffffffffffffU
-    && f4 == (uint64_t)0xffffffffffffU;
+    >= 0xffffefffffc2fULL
+    && f1 == 0xfffffffffffffULL
+    && f2 == 0xfffffffffffffULL
+    && f3 == 0xfffffffffffffULL
+    && f4 == 0xffffffffffffULL;
   return !is_ge_p;
 }
 
@@ -164,10 +158,10 @@ static inline void Hacl_K256_Field_store_felem(uint8_t *b, uint64_t *f)
   uint64_t f20 = f[2U];
   uint64_t f30 = f[3U];
   uint64_t f4 = f[4U];
-  uint64_t o0 = f00 | f10 << (uint32_t)52U;
-  uint64_t o1 = f10 >> (uint32_t)12U | f20 << (uint32_t)40U;
-  uint64_t o2 = f20 >> (uint32_t)24U | f30 << (uint32_t)28U;
-  uint64_t o3 = f30 >> (uint32_t)36U | f4 << (uint32_t)16U;
+  uint64_t o0 = f00 | f10 << 52U;
+  uint64_t o1 = f10 >> 12U | f20 << 40U;
+  uint64_t o2 = f20 >> 24U | f30 << 28U;
+  uint64_t o3 = f30 >> 36U | f4 << 16U;
   uint64_t f0 = o0;
   uint64_t f1 = o1;
   uint64_t f2 = o2;
@@ -176,11 +170,7 @@ static inline void Hacl_K256_Field_store_felem(uint8_t *b, uint64_t *f)
   tmp[1U] = f2;
   tmp[2U] = f1;
   tmp[3U] = f0;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(b + i * (uint32_t)8U, tmp[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(b + i * 8U, tmp[i]););
 }
 
 static inline void Hacl_K256_Field_fmul_small_num(uint64_t *out, uint64_t *f, uint64_t num)
@@ -248,11 +238,11 @@ static inline void Hacl_K256_Field_fsub(uint64_t *out, uint64_t *f1, uint64_t *f
   uint64_t b2 = f2[2U];
   uint64_t b3 = f2[3U];
   uint64_t b4 = f2[4U];
-  uint64_t r00 = (uint64_t)9007190664804446U * x - b0;
-  uint64_t r10 = (uint64_t)9007199254740990U * x - b1;
-  uint64_t r20 = (uint64_t)9007199254740990U * x - b2;
-  uint64_t r30 = (uint64_t)9007199254740990U * x - b3;
-  uint64_t r40 = (uint64_t)562949953421310U * x - b4;
+  uint64_t r00 = 9007190664804446ULL * x - b0;
+  uint64_t r10 = 9007199254740990ULL * x - b1;
+  uint64_t r20 = 9007199254740990ULL * x - b2;
+  uint64_t r30 = 9007199254740990ULL * x - b3;
+  uint64_t r40 = 562949953421310ULL * x - b4;
   uint64_t r0 = r00;
   uint64_t r1 = r10;
   uint64_t r2 = r20;
@@ -287,7 +277,7 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   uint64_t b2 = f2[2U];
   uint64_t b3 = f2[3U];
   uint64_t b4 = f2[4U];
-  uint64_t r = (uint64_t)0x1000003D10U;
+  uint64_t r = 0x1000003D10ULL;
   FStar_UInt128_uint128
   d0 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0,
@@ -298,9 +288,9 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, b4);
   FStar_UInt128_uint128
   d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0)));
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U));
-  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U);
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, 64U));
+  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, 52U);
   FStar_UInt128_uint128
   d3 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2,
@@ -309,12 +299,11 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
           FStar_UInt128_mul_wide(a2, b2)),
         FStar_UInt128_mul_wide(a3, b1)),
       FStar_UInt128_mul_wide(a4, b0));
-  FStar_UInt128_uint128
-  d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1));
-  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U);
-  uint64_t tx = t4 >> (uint32_t)48U;
-  uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU;
+  FStar_UInt128_uint128 d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << 12U, c1));
+  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, 52U);
+  uint64_t tx = t4 >> 48U;
+  uint64_t t4_ = t4 & 0xffffffffffffULL;
   FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, b0);
   FStar_UInt128_uint128
   d6 =
@@ -323,13 +312,12 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
           FStar_UInt128_mul_wide(a2, b3)),
         FStar_UInt128_mul_wide(a3, b2)),
       FStar_UInt128_mul_wide(a4, b1));
-  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U);
-  uint64_t u0_ = tx | u0 << (uint32_t)4U;
-  FStar_UInt128_uint128
-  c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U));
-  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U);
+  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, 52U);
+  uint64_t u0_ = tx | u0 << 4U;
+  FStar_UInt128_uint128 c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> 4U));
+  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, 52U);
   FStar_UInt128_uint128
   c5 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a0, b1)),
@@ -343,10 +331,10 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
   FStar_UInt128_uint128
   c6 =
     FStar_UInt128_add_mod(c5,
-      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r));
-  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U);
-  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U);
+      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & 0xfffffffffffffULL, r));
+  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, 52U);
+  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, 52U);
   FStar_UInt128_uint128
   c8 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7,
@@ -359,16 +347,15 @@ static inline void Hacl_K256_Field_fmul(uint64_t *out, uint64_t *f1, uint64_t *f
       FStar_UInt128_mul_wide(a4, b3));
   FStar_UInt128_uint128
   c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10)));
-  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U));
-  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U);
+  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, 64U));
+  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, 52U);
   FStar_UInt128_uint128
   c11 =
-    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10,
-        FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)),
+    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, FStar_UInt128_mul_wide(r << 12U, d11)),
       FStar_UInt128_uint64_to_uint128(t3));
-  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU;
-  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U));
+  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & 0xfffffffffffffULL;
+  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, 52U));
   uint64_t r4 = c12 + t4_;
   uint64_t f0 = r0;
   uint64_t f11 = r1;
@@ -389,43 +376,41 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
   uint64_t a4 = f[4U];
-  uint64_t r = (uint64_t)0x1000003D10U;
+  uint64_t r = 0x1000003D10ULL;
   FStar_UInt128_uint128
   d0 =
-    FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0 * (uint64_t)2U, a3),
-      FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a2));
+    FStar_UInt128_add_mod(FStar_UInt128_mul_wide(a0 * 2ULL, a3),
+      FStar_UInt128_mul_wide(a1 * 2ULL, a2));
   FStar_UInt128_uint128 c0 = FStar_UInt128_mul_wide(a4, a4);
   FStar_UInt128_uint128
   d1 = FStar_UInt128_add_mod(d0, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(c0)));
-  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, (uint32_t)64U));
-  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, (uint32_t)52U);
-  uint64_t a41 = a4 * (uint64_t)2U;
+  uint64_t c1 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c0, 64U));
+  uint64_t t3 = FStar_UInt128_uint128_to_uint64(d1) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d2 = FStar_UInt128_shift_right(d1, 52U);
+  uint64_t a41 = a4 * 2ULL;
   FStar_UInt128_uint128
   d3 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(d2,
           FStar_UInt128_mul_wide(a0, a41)),
-        FStar_UInt128_mul_wide(a1 * (uint64_t)2U, a3)),
+        FStar_UInt128_mul_wide(a1 * 2ULL, a3)),
       FStar_UInt128_mul_wide(a2, a2));
-  FStar_UInt128_uint128
-  d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << (uint32_t)12U, c1));
-  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, (uint32_t)52U);
-  uint64_t tx = t4 >> (uint32_t)48U;
-  uint64_t t4_ = t4 & (uint64_t)0xffffffffffffU;
+  FStar_UInt128_uint128 d4 = FStar_UInt128_add_mod(d3, FStar_UInt128_mul_wide(r << 12U, c1));
+  uint64_t t4 = FStar_UInt128_uint128_to_uint64(d4) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d5 = FStar_UInt128_shift_right(d4, 52U);
+  uint64_t tx = t4 >> 48U;
+  uint64_t t4_ = t4 & 0xffffffffffffULL;
   FStar_UInt128_uint128 c2 = FStar_UInt128_mul_wide(a0, a0);
   FStar_UInt128_uint128
   d6 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(d5, FStar_UInt128_mul_wide(a1, a41)),
-      FStar_UInt128_mul_wide(a2 * (uint64_t)2U, a3));
-  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, (uint32_t)52U);
-  uint64_t u0_ = tx | u0 << (uint32_t)4U;
-  FStar_UInt128_uint128
-  c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> (uint32_t)4U));
-  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, (uint32_t)52U);
-  uint64_t a01 = a0 * (uint64_t)2U;
+      FStar_UInt128_mul_wide(a2 * 2ULL, a3));
+  uint64_t u0 = FStar_UInt128_uint128_to_uint64(d6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 d7 = FStar_UInt128_shift_right(d6, 52U);
+  uint64_t u0_ = tx | u0 << 4U;
+  FStar_UInt128_uint128 c3 = FStar_UInt128_add_mod(c2, FStar_UInt128_mul_wide(u0_, r >> 4U));
+  uint64_t r0 = FStar_UInt128_uint128_to_uint64(c3) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c4 = FStar_UInt128_shift_right(c3, 52U);
+  uint64_t a01 = a0 * 2ULL;
   FStar_UInt128_uint128 c5 = FStar_UInt128_add_mod(c4, FStar_UInt128_mul_wide(a01, a1));
   FStar_UInt128_uint128
   d8 =
@@ -434,10 +419,10 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   FStar_UInt128_uint128
   c6 =
     FStar_UInt128_add_mod(c5,
-      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & (uint64_t)0xfffffffffffffU, r));
-  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, (uint32_t)52U);
-  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, (uint32_t)52U);
+      FStar_UInt128_mul_wide(FStar_UInt128_uint128_to_uint64(d8) & 0xfffffffffffffULL, r));
+  FStar_UInt128_uint128 d9 = FStar_UInt128_shift_right(d8, 52U);
+  uint64_t r1 = FStar_UInt128_uint128_to_uint64(c6) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c7 = FStar_UInt128_shift_right(c6, 52U);
   FStar_UInt128_uint128
   c8 =
     FStar_UInt128_add_mod(FStar_UInt128_add_mod(c7, FStar_UInt128_mul_wide(a01, a2)),
@@ -445,16 +430,15 @@ static inline void Hacl_K256_Field_fsqr(uint64_t *out, uint64_t *f)
   FStar_UInt128_uint128 d10 = FStar_UInt128_add_mod(d9, FStar_UInt128_mul_wide(a3, a41));
   FStar_UInt128_uint128
   c9 = FStar_UInt128_add_mod(c8, FStar_UInt128_mul_wide(r, FStar_UInt128_uint128_to_uint64(d10)));
-  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, (uint32_t)64U));
-  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & (uint64_t)0xfffffffffffffU;
-  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, (uint32_t)52U);
+  uint64_t d11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(d10, 64U));
+  uint64_t r2 = FStar_UInt128_uint128_to_uint64(c9) & 0xfffffffffffffULL;
+  FStar_UInt128_uint128 c10 = FStar_UInt128_shift_right(c9, 52U);
   FStar_UInt128_uint128
   c11 =
-    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10,
-        FStar_UInt128_mul_wide(r << (uint32_t)12U, d11)),
+    FStar_UInt128_add_mod(FStar_UInt128_add_mod(c10, FStar_UInt128_mul_wide(r << 12U, d11)),
       FStar_UInt128_uint64_to_uint128(t3));
-  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & (uint64_t)0xfffffffffffffU;
-  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, (uint32_t)52U));
+  uint64_t r3 = FStar_UInt128_uint128_to_uint64(c11) & 0xfffffffffffffULL;
+  uint64_t c12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(c11, 52U));
   uint64_t r4 = c12 + t4_;
   uint64_t f0 = r0;
   uint64_t f1 = r1;
@@ -475,23 +459,23 @@ static inline void Hacl_K256_Field_fnormalize_weak(uint64_t *out, uint64_t *f)
   uint64_t t2 = f[2U];
   uint64_t t3 = f[3U];
   uint64_t t4 = f[4U];
-  uint64_t x0 = t4 >> (uint32_t)48U;
-  uint64_t t410 = t4 & (uint64_t)0xffffffffffffU;
+  uint64_t x0 = t4 >> 48U;
+  uint64_t t410 = t4 & 0xffffffffffffULL;
   uint64_t x = x0;
   uint64_t t01 = t0;
   uint64_t t11 = t1;
   uint64_t t21 = t2;
   uint64_t t31 = t3;
   uint64_t t41 = t410;
-  uint64_t t02 = t01 + x * (uint64_t)0x1000003D1U;
-  uint64_t t12 = t11 + (t02 >> (uint32_t)52U);
-  uint64_t t03 = t02 & (uint64_t)0xfffffffffffffU;
-  uint64_t t22 = t21 + (t12 >> (uint32_t)52U);
-  uint64_t t13 = t12 & (uint64_t)0xfffffffffffffU;
-  uint64_t t32 = t31 + (t22 >> (uint32_t)52U);
-  uint64_t t23 = t22 & (uint64_t)0xfffffffffffffU;
-  uint64_t t42 = t41 + (t32 >> (uint32_t)52U);
-  uint64_t t33 = t32 & (uint64_t)0xfffffffffffffU;
+  uint64_t t02 = t01 + x * 0x1000003D1ULL;
+  uint64_t t12 = t11 + (t02 >> 52U);
+  uint64_t t03 = t02 & 0xfffffffffffffULL;
+  uint64_t t22 = t21 + (t12 >> 52U);
+  uint64_t t13 = t12 & 0xfffffffffffffULL;
+  uint64_t t32 = t31 + (t22 >> 52U);
+  uint64_t t23 = t22 & 0xfffffffffffffULL;
+  uint64_t t42 = t41 + (t32 >> 52U);
+  uint64_t t33 = t32 & 0xfffffffffffffULL;
   uint64_t f0 = t03;
   uint64_t f1 = t13;
   uint64_t f2 = t23;
@@ -511,59 +495,59 @@ static inline void Hacl_K256_Field_fnormalize(uint64_t *out, uint64_t *f)
   uint64_t f20 = f[2U];
   uint64_t f30 = f[3U];
   uint64_t f40 = f[4U];
-  uint64_t x0 = f40 >> (uint32_t)48U;
-  uint64_t t40 = f40 & (uint64_t)0xffffffffffffU;
+  uint64_t x0 = f40 >> 48U;
+  uint64_t t40 = f40 & 0xffffffffffffULL;
   uint64_t x1 = x0;
   uint64_t t00 = f00;
   uint64_t t10 = f10;
   uint64_t t20 = f20;
   uint64_t t30 = f30;
   uint64_t t42 = t40;
-  uint64_t t01 = t00 + x1 * (uint64_t)0x1000003D1U;
-  uint64_t t110 = t10 + (t01 >> (uint32_t)52U);
-  uint64_t t020 = t01 & (uint64_t)0xfffffffffffffU;
-  uint64_t t210 = t20 + (t110 >> (uint32_t)52U);
-  uint64_t t120 = t110 & (uint64_t)0xfffffffffffffU;
-  uint64_t t310 = t30 + (t210 >> (uint32_t)52U);
-  uint64_t t220 = t210 & (uint64_t)0xfffffffffffffU;
-  uint64_t t410 = t42 + (t310 >> (uint32_t)52U);
-  uint64_t t320 = t310 & (uint64_t)0xfffffffffffffU;
+  uint64_t t01 = t00 + x1 * 0x1000003D1ULL;
+  uint64_t t110 = t10 + (t01 >> 52U);
+  uint64_t t020 = t01 & 0xfffffffffffffULL;
+  uint64_t t210 = t20 + (t110 >> 52U);
+  uint64_t t120 = t110 & 0xfffffffffffffULL;
+  uint64_t t310 = t30 + (t210 >> 52U);
+  uint64_t t220 = t210 & 0xfffffffffffffULL;
+  uint64_t t410 = t42 + (t310 >> 52U);
+  uint64_t t320 = t310 & 0xfffffffffffffULL;
   uint64_t t0 = t020;
   uint64_t t1 = t120;
   uint64_t t2 = t220;
   uint64_t t3 = t320;
   uint64_t t4 = t410;
-  uint64_t x2 = t4 >> (uint32_t)48U;
-  uint64_t t411 = t4 & (uint64_t)0xffffffffffffU;
+  uint64_t x2 = t4 >> 48U;
+  uint64_t t411 = t4 & 0xffffffffffffULL;
   uint64_t x = x2;
   uint64_t r0 = t0;
   uint64_t r1 = t1;
   uint64_t r2 = t2;
   uint64_t r3 = t3;
   uint64_t r4 = t411;
-  uint64_t m4 = FStar_UInt64_eq_mask(r4, (uint64_t)0xffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(r3, (uint64_t)0xfffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(r2, (uint64_t)0xfffffffffffffU);
-  uint64_t m1 = FStar_UInt64_eq_mask(r1, (uint64_t)0xfffffffffffffU);
-  uint64_t m0 = FStar_UInt64_gte_mask(r0, (uint64_t)0xffffefffffc2fU);
+  uint64_t m4 = FStar_UInt64_eq_mask(r4, 0xffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(r3, 0xfffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(r2, 0xfffffffffffffULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(r1, 0xfffffffffffffULL);
+  uint64_t m0 = FStar_UInt64_gte_mask(r0, 0xffffefffffc2fULL);
   uint64_t is_ge_p_m = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t m_to_one = is_ge_p_m & (uint64_t)1U;
+  uint64_t m_to_one = is_ge_p_m & 1ULL;
   uint64_t x10 = m_to_one | x;
-  uint64_t t010 = r0 + x10 * (uint64_t)0x1000003D1U;
-  uint64_t t11 = r1 + (t010 >> (uint32_t)52U);
-  uint64_t t02 = t010 & (uint64_t)0xfffffffffffffU;
-  uint64_t t21 = r2 + (t11 >> (uint32_t)52U);
-  uint64_t t12 = t11 & (uint64_t)0xfffffffffffffU;
-  uint64_t t31 = r3 + (t21 >> (uint32_t)52U);
-  uint64_t t22 = t21 & (uint64_t)0xfffffffffffffU;
-  uint64_t t41 = r4 + (t31 >> (uint32_t)52U);
-  uint64_t t32 = t31 & (uint64_t)0xfffffffffffffU;
+  uint64_t t010 = r0 + x10 * 0x1000003D1ULL;
+  uint64_t t11 = r1 + (t010 >> 52U);
+  uint64_t t02 = t010 & 0xfffffffffffffULL;
+  uint64_t t21 = r2 + (t11 >> 52U);
+  uint64_t t12 = t11 & 0xfffffffffffffULL;
+  uint64_t t31 = r3 + (t21 >> 52U);
+  uint64_t t22 = t21 & 0xfffffffffffffULL;
+  uint64_t t41 = r4 + (t31 >> 52U);
+  uint64_t t32 = t31 & 0xfffffffffffffULL;
   uint64_t s0 = t02;
   uint64_t s1 = t12;
   uint64_t s2 = t22;
   uint64_t s3 = t32;
   uint64_t s4 = t41;
-  uint64_t t412 = s4 & (uint64_t)0xffffffffffffU;
+  uint64_t t412 = s4 & 0xffffffffffffULL;
   uint64_t k0 = s0;
   uint64_t k1 = s1;
   uint64_t k2 = s2;
@@ -590,11 +574,11 @@ static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool
     uint64_t a2 = f[2U];
     uint64_t a3 = f[3U];
     uint64_t a4 = f[4U];
-    uint64_t r0 = (uint64_t)9007190664804446U - a0;
-    uint64_t r1 = (uint64_t)9007199254740990U - a1;
-    uint64_t r2 = (uint64_t)9007199254740990U - a2;
-    uint64_t r3 = (uint64_t)9007199254740990U - a3;
-    uint64_t r4 = (uint64_t)562949953421310U - a4;
+    uint64_t r0 = 9007190664804446ULL - a0;
+    uint64_t r1 = 9007199254740990ULL - a1;
+    uint64_t r2 = 9007199254740990ULL - a2;
+    uint64_t r3 = 9007199254740990ULL - a3;
+    uint64_t r4 = 562949953421310ULL - a4;
     uint64_t f0 = r0;
     uint64_t f1 = r1;
     uint64_t f2 = r2;
@@ -612,7 +596,7 @@ static inline void Hacl_K256_Field_fnegate_conditional_vartime(uint64_t *f, bool
 
 static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uint32_t b)
 {
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  for (uint32_t i = 0U; i < b; i++)
   {
     Hacl_K256_Field_fsqr(out, out);
   }
@@ -620,8 +604,8 @@ static inline void Hacl_Impl_K256_Finv_fsquare_times_in_place(uint64_t *out, uin
 
 static inline void Hacl_Impl_K256_Finv_fsquare_times(uint64_t *out, uint64_t *a, uint32_t b)
 {
-  memcpy(out, a, (uint32_t)5U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  memcpy(out, a, 5U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < b; i++)
   {
     Hacl_K256_Field_fsqr(out, out);
   }
@@ -633,29 +617,29 @@ static inline void Hacl_Impl_K256_Finv_fexp_223_23(uint64_t *out, uint64_t *x2,
   uint64_t x22[5U] = { 0U };
   uint64_t x44[5U] = { 0U };
   uint64_t x88[5U] = { 0U };
-  Hacl_Impl_K256_Finv_fsquare_times(x2, f, (uint32_t)1U);
+  Hacl_Impl_K256_Finv_fsquare_times(x2, f, 1U);
   Hacl_K256_Field_fmul(x2, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times(x3, x2, (uint32_t)1U);
+  Hacl_Impl_K256_Finv_fsquare_times(x3, x2, 1U);
   Hacl_K256_Field_fmul(x3, x3, f);
-  Hacl_Impl_K256_Finv_fsquare_times(out, x3, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times(out, x3, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times(x22, out, (uint32_t)11U);
+  Hacl_Impl_K256_Finv_fsquare_times(x22, out, 11U);
   Hacl_K256_Field_fmul(x22, x22, out);
-  Hacl_Impl_K256_Finv_fsquare_times(x44, x22, (uint32_t)22U);
+  Hacl_Impl_K256_Finv_fsquare_times(x44, x22, 22U);
   Hacl_K256_Field_fmul(x44, x44, x22);
-  Hacl_Impl_K256_Finv_fsquare_times(x88, x44, (uint32_t)44U);
+  Hacl_Impl_K256_Finv_fsquare_times(x88, x44, 44U);
   Hacl_K256_Field_fmul(x88, x88, x44);
-  Hacl_Impl_K256_Finv_fsquare_times(out, x88, (uint32_t)88U);
+  Hacl_Impl_K256_Finv_fsquare_times(out, x88, 88U);
   Hacl_K256_Field_fmul(out, out, x88);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)44U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 44U);
   Hacl_K256_Field_fmul(out, out, x44);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x3);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)23U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 23U);
   Hacl_K256_Field_fmul(out, out, x22);
 }
 
@@ -663,11 +647,11 @@ static inline void Hacl_Impl_K256_Finv_finv(uint64_t *out, uint64_t *f)
 {
   uint64_t x2[5U] = { 0U };
   Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)5U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 5U);
   Hacl_K256_Field_fmul(out, out, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)3U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 3U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
   Hacl_K256_Field_fmul(out, out, f);
 }
 
@@ -675,9 +659,9 @@ static inline void Hacl_Impl_K256_Finv_fsqrt(uint64_t *out, uint64_t *f)
 {
   uint64_t x2[5U] = { 0U };
   Hacl_Impl_K256_Finv_fexp_223_23(out, x2, f);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)6U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 6U);
   Hacl_K256_Field_fmul(out, out, x2);
-  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, (uint32_t)2U);
+  Hacl_Impl_K256_Finv_fsquare_times_in_place(out, 2U);
 }
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_Ed25519_PrecompTable.h b/include/msvc/internal/Hacl_Ed25519_PrecompTable.h
index 77d2244c..a20cd912 100644
--- a/include/msvc/internal/Hacl_Ed25519_PrecompTable.h
+++ b/include/msvc/internal/Hacl_Ed25519_PrecompTable.h
@@ -39,655 +39,491 @@ static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1738742601995546U, (uint64_t)1146398526822698U,
-    (uint64_t)2070867633025821U, (uint64_t)562264141797630U, (uint64_t)587772402128613U,
-    (uint64_t)1801439850948184U, (uint64_t)1351079888211148U, (uint64_t)450359962737049U,
-    (uint64_t)900719925474099U, (uint64_t)1801439850948198U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1841354044333475U,
-    (uint64_t)16398895984059U, (uint64_t)755974180946558U, (uint64_t)900171276175154U,
-    (uint64_t)1821297809914039U, (uint64_t)1661154287933054U, (uint64_t)284530020860578U,
-    (uint64_t)1390261174866914U, (uint64_t)1524110943907984U, (uint64_t)1045603498418422U,
-    (uint64_t)928651508580478U, (uint64_t)1383326941296346U, (uint64_t)961937908925785U,
-    (uint64_t)80455759693706U, (uint64_t)904734540352947U, (uint64_t)1507481815385608U,
-    (uint64_t)2223447444246085U, (uint64_t)1083941587175919U, (uint64_t)2059929906842505U,
-    (uint64_t)1581435440146976U, (uint64_t)782730187692425U, (uint64_t)9928394897574U,
-    (uint64_t)1539449519985236U, (uint64_t)1923587931078510U, (uint64_t)552919286076056U,
-    (uint64_t)376925408065760U, (uint64_t)447320488831784U, (uint64_t)1362918338468019U,
-    (uint64_t)1470031896696846U, (uint64_t)2189796996539902U, (uint64_t)1337552949959847U,
-    (uint64_t)1762287177775726U, (uint64_t)237994495816815U, (uint64_t)1277840395970544U,
-    (uint64_t)543972849007241U, (uint64_t)1224692671618814U, (uint64_t)162359533289271U,
-    (uint64_t)282240927125249U, (uint64_t)586909166382289U, (uint64_t)17726488197838U,
-    (uint64_t)377014554985659U, (uint64_t)1433835303052512U, (uint64_t)702061469493692U,
-    (uint64_t)1142253108318154U, (uint64_t)318297794307551U, (uint64_t)954362646308543U,
-    (uint64_t)517363881452320U, (uint64_t)1868013482130416U, (uint64_t)262562472373260U,
-    (uint64_t)902232853249919U, (uint64_t)2107343057055746U, (uint64_t)462368348619024U,
-    (uint64_t)1893758677092974U, (uint64_t)2177729767846389U, (uint64_t)2168532543559143U,
-    (uint64_t)443867094639821U, (uint64_t)730169342581022U, (uint64_t)1564589016879755U,
-    (uint64_t)51218195700649U, (uint64_t)76684578423745U, (uint64_t)560266272480743U,
-    (uint64_t)922517457707697U, (uint64_t)2066645939860874U, (uint64_t)1318277348414638U,
-    (uint64_t)1576726809084003U, (uint64_t)1817337608563665U, (uint64_t)1874240939237666U,
-    (uint64_t)754733726333910U, (uint64_t)97085310406474U, (uint64_t)751148364309235U,
-    (uint64_t)1622159695715187U, (uint64_t)1444098819684916U, (uint64_t)130920805558089U,
-    (uint64_t)1260449179085308U, (uint64_t)1860021740768461U, (uint64_t)110052860348509U,
-    (uint64_t)193830891643810U, (uint64_t)164148413933881U, (uint64_t)180017794795332U,
-    (uint64_t)1523506525254651U, (uint64_t)465981629225956U, (uint64_t)559733514964572U,
-    (uint64_t)1279624874416974U, (uint64_t)2026642326892306U, (uint64_t)1425156829982409U,
-    (uint64_t)2160936383793147U, (uint64_t)1061870624975247U, (uint64_t)2023497043036941U,
-    (uint64_t)117942212883190U, (uint64_t)490339622800774U, (uint64_t)1729931303146295U,
-    (uint64_t)422305932971074U, (uint64_t)529103152793096U, (uint64_t)1211973233775992U,
-    (uint64_t)721364955929681U, (uint64_t)1497674430438813U, (uint64_t)342545521275073U,
-    (uint64_t)2102107575279372U, (uint64_t)2108462244669966U, (uint64_t)1382582406064082U,
-    (uint64_t)2206396818383323U, (uint64_t)2109093268641147U, (uint64_t)10809845110983U,
-    (uint64_t)1605176920880099U, (uint64_t)744640650753946U, (uint64_t)1712758897518129U,
-    (uint64_t)373410811281809U, (uint64_t)648838265800209U, (uint64_t)813058095530999U,
-    (uint64_t)513987632620169U, (uint64_t)465516160703329U, (uint64_t)2136322186126330U,
-    (uint64_t)1979645899422932U, (uint64_t)1197131006470786U, (uint64_t)1467836664863979U,
-    (uint64_t)1340751381374628U, (uint64_t)1810066212667962U, (uint64_t)1009933588225499U,
-    (uint64_t)1106129188080873U, (uint64_t)1388980405213901U, (uint64_t)533719246598044U,
-    (uint64_t)1169435803073277U, (uint64_t)198920999285821U, (uint64_t)487492330629854U,
-    (uint64_t)1807093008537778U, (uint64_t)1540899012923865U, (uint64_t)2075080271659867U,
-    (uint64_t)1527990806921523U, (uint64_t)1323728742908002U, (uint64_t)1568595959608205U,
-    (uint64_t)1388032187497212U, (uint64_t)2026968840050568U, (uint64_t)1396591153295755U,
-    (uint64_t)820416950170901U, (uint64_t)520060313205582U, (uint64_t)2016404325094901U,
-    (uint64_t)1584709677868520U, (uint64_t)272161374469956U, (uint64_t)1567188603996816U,
-    (uint64_t)1986160530078221U, (uint64_t)553930264324589U, (uint64_t)1058426729027503U,
-    (uint64_t)8762762886675U, (uint64_t)2216098143382988U, (uint64_t)1835145266889223U,
-    (uint64_t)1712936431558441U, (uint64_t)1017009937844974U, (uint64_t)585361667812740U,
-    (uint64_t)2114711541628181U, (uint64_t)2238729632971439U, (uint64_t)121257546253072U,
-    (uint64_t)847154149018345U, (uint64_t)211972965476684U, (uint64_t)287499084460129U,
-    (uint64_t)2098247259180197U, (uint64_t)839070411583329U, (uint64_t)339551619574372U,
-    (uint64_t)1432951287640743U, (uint64_t)526481249498942U, (uint64_t)931991661905195U,
-    (uint64_t)1884279965674487U, (uint64_t)200486405604411U, (uint64_t)364173020594788U,
-    (uint64_t)518034455936955U, (uint64_t)1085564703965501U, (uint64_t)16030410467927U,
-    (uint64_t)604865933167613U, (uint64_t)1695298441093964U, (uint64_t)498856548116159U,
-    (uint64_t)2193030062787034U, (uint64_t)1706339802964179U, (uint64_t)1721199073493888U,
-    (uint64_t)820740951039755U, (uint64_t)1216053436896834U, (uint64_t)23954895815139U,
-    (uint64_t)1662515208920491U, (uint64_t)1705443427511899U, (uint64_t)1957928899570365U,
-    (uint64_t)1189636258255725U, (uint64_t)1795695471103809U, (uint64_t)1691191297654118U,
-    (uint64_t)282402585374360U, (uint64_t)460405330264832U, (uint64_t)63765529445733U,
-    (uint64_t)469763447404473U, (uint64_t)733607089694996U, (uint64_t)685410420186959U,
-    (uint64_t)1096682630419738U, (uint64_t)1162548510542362U, (uint64_t)1020949526456676U,
-    (uint64_t)1211660396870573U, (uint64_t)613126398222696U, (uint64_t)1117829165843251U,
-    (uint64_t)742432540886650U, (uint64_t)1483755088010658U, (uint64_t)942392007134474U,
-    (uint64_t)1447834130944107U, (uint64_t)489368274863410U, (uint64_t)23192985544898U,
-    (uint64_t)648442406146160U, (uint64_t)785438843373876U, (uint64_t)249464684645238U,
-    (uint64_t)170494608205618U, (uint64_t)335112827260550U, (uint64_t)1462050123162735U,
-    (uint64_t)1084803668439016U, (uint64_t)853459233600325U, (uint64_t)215777728187495U,
-    (uint64_t)1965759433526974U, (uint64_t)1349482894446537U, (uint64_t)694163317612871U,
-    (uint64_t)860536766165036U, (uint64_t)1178788094084321U, (uint64_t)1652739626626996U,
-    (uint64_t)2115723946388185U, (uint64_t)1577204379094664U, (uint64_t)1083882859023240U,
-    (uint64_t)1768759143381635U, (uint64_t)1737180992507258U, (uint64_t)246054513922239U,
-    (uint64_t)577253134087234U, (uint64_t)356340280578042U, (uint64_t)1638917769925142U,
-    (uint64_t)223550348130103U, (uint64_t)470592666638765U, (uint64_t)22663573966996U,
-    (uint64_t)596552461152400U, (uint64_t)364143537069499U, (uint64_t)3942119457699U,
-    (uint64_t)107951982889287U, (uint64_t)1843471406713209U, (uint64_t)1625773041610986U,
-    (uint64_t)1466141092501702U, (uint64_t)1043024095021271U, (uint64_t)310429964047508U,
-    (uint64_t)98559121500372U, (uint64_t)152746933782868U, (uint64_t)259407205078261U,
-    (uint64_t)828123093322585U, (uint64_t)1576847274280091U, (uint64_t)1170871375757302U,
-    (uint64_t)1588856194642775U, (uint64_t)984767822341977U, (uint64_t)1141497997993760U,
-    (uint64_t)809325345150796U, (uint64_t)1879837728202511U, (uint64_t)201340910657893U,
-    (uint64_t)1079157558888483U, (uint64_t)1052373448588065U, (uint64_t)1732036202501778U,
-    (uint64_t)2105292670328445U, (uint64_t)679751387312402U, (uint64_t)1679682144926229U,
-    (uint64_t)1695823455818780U, (uint64_t)498852317075849U, (uint64_t)1786555067788433U,
-    (uint64_t)1670727545779425U, (uint64_t)117945875433544U, (uint64_t)407939139781844U,
-    (uint64_t)854632120023778U, (uint64_t)1413383148360437U, (uint64_t)286030901733673U,
-    (uint64_t)1207361858071196U, (uint64_t)461340408181417U, (uint64_t)1096919590360164U,
-    (uint64_t)1837594897475685U, (uint64_t)533755561544165U, (uint64_t)1638688042247712U,
-    (uint64_t)1431653684793005U, (uint64_t)1036458538873559U, (uint64_t)390822120341779U,
-    (uint64_t)1920929837111618U, (uint64_t)543426740024168U, (uint64_t)645751357799929U,
-    (uint64_t)2245025632994463U, (uint64_t)1550778638076452U, (uint64_t)223738153459949U,
-    (uint64_t)1337209385492033U, (uint64_t)1276967236456531U, (uint64_t)1463815821063071U,
-    (uint64_t)2070620870191473U, (uint64_t)1199170709413753U, (uint64_t)273230877394166U,
-    (uint64_t)1873264887608046U, (uint64_t)890877152910775U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1738742601995546ULL, 1146398526822698ULL, 2070867633025821ULL,
+    562264141797630ULL, 587772402128613ULL, 1801439850948184ULL, 1351079888211148ULL,
+    450359962737049ULL, 900719925474099ULL, 1801439850948198ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1841354044333475ULL, 16398895984059ULL, 755974180946558ULL, 900171276175154ULL,
+    1821297809914039ULL, 1661154287933054ULL, 284530020860578ULL, 1390261174866914ULL,
+    1524110943907984ULL, 1045603498418422ULL, 928651508580478ULL, 1383326941296346ULL,
+    961937908925785ULL, 80455759693706ULL, 904734540352947ULL, 1507481815385608ULL,
+    2223447444246085ULL, 1083941587175919ULL, 2059929906842505ULL, 1581435440146976ULL,
+    782730187692425ULL, 9928394897574ULL, 1539449519985236ULL, 1923587931078510ULL,
+    552919286076056ULL, 376925408065760ULL, 447320488831784ULL, 1362918338468019ULL,
+    1470031896696846ULL, 2189796996539902ULL, 1337552949959847ULL, 1762287177775726ULL,
+    237994495816815ULL, 1277840395970544ULL, 543972849007241ULL, 1224692671618814ULL,
+    162359533289271ULL, 282240927125249ULL, 586909166382289ULL, 17726488197838ULL,
+    377014554985659ULL, 1433835303052512ULL, 702061469493692ULL, 1142253108318154ULL,
+    318297794307551ULL, 954362646308543ULL, 517363881452320ULL, 1868013482130416ULL,
+    262562472373260ULL, 902232853249919ULL, 2107343057055746ULL, 462368348619024ULL,
+    1893758677092974ULL, 2177729767846389ULL, 2168532543559143ULL, 443867094639821ULL,
+    730169342581022ULL, 1564589016879755ULL, 51218195700649ULL, 76684578423745ULL,
+    560266272480743ULL, 922517457707697ULL, 2066645939860874ULL, 1318277348414638ULL,
+    1576726809084003ULL, 1817337608563665ULL, 1874240939237666ULL, 754733726333910ULL,
+    97085310406474ULL, 751148364309235ULL, 1622159695715187ULL, 1444098819684916ULL,
+    130920805558089ULL, 1260449179085308ULL, 1860021740768461ULL, 110052860348509ULL,
+    193830891643810ULL, 164148413933881ULL, 180017794795332ULL, 1523506525254651ULL,
+    465981629225956ULL, 559733514964572ULL, 1279624874416974ULL, 2026642326892306ULL,
+    1425156829982409ULL, 2160936383793147ULL, 1061870624975247ULL, 2023497043036941ULL,
+    117942212883190ULL, 490339622800774ULL, 1729931303146295ULL, 422305932971074ULL,
+    529103152793096ULL, 1211973233775992ULL, 721364955929681ULL, 1497674430438813ULL,
+    342545521275073ULL, 2102107575279372ULL, 2108462244669966ULL, 1382582406064082ULL,
+    2206396818383323ULL, 2109093268641147ULL, 10809845110983ULL, 1605176920880099ULL,
+    744640650753946ULL, 1712758897518129ULL, 373410811281809ULL, 648838265800209ULL,
+    813058095530999ULL, 513987632620169ULL, 465516160703329ULL, 2136322186126330ULL,
+    1979645899422932ULL, 1197131006470786ULL, 1467836664863979ULL, 1340751381374628ULL,
+    1810066212667962ULL, 1009933588225499ULL, 1106129188080873ULL, 1388980405213901ULL,
+    533719246598044ULL, 1169435803073277ULL, 198920999285821ULL, 487492330629854ULL,
+    1807093008537778ULL, 1540899012923865ULL, 2075080271659867ULL, 1527990806921523ULL,
+    1323728742908002ULL, 1568595959608205ULL, 1388032187497212ULL, 2026968840050568ULL,
+    1396591153295755ULL, 820416950170901ULL, 520060313205582ULL, 2016404325094901ULL,
+    1584709677868520ULL, 272161374469956ULL, 1567188603996816ULL, 1986160530078221ULL,
+    553930264324589ULL, 1058426729027503ULL, 8762762886675ULL, 2216098143382988ULL,
+    1835145266889223ULL, 1712936431558441ULL, 1017009937844974ULL, 585361667812740ULL,
+    2114711541628181ULL, 2238729632971439ULL, 121257546253072ULL, 847154149018345ULL,
+    211972965476684ULL, 287499084460129ULL, 2098247259180197ULL, 839070411583329ULL,
+    339551619574372ULL, 1432951287640743ULL, 526481249498942ULL, 931991661905195ULL,
+    1884279965674487ULL, 200486405604411ULL, 364173020594788ULL, 518034455936955ULL,
+    1085564703965501ULL, 16030410467927ULL, 604865933167613ULL, 1695298441093964ULL,
+    498856548116159ULL, 2193030062787034ULL, 1706339802964179ULL, 1721199073493888ULL,
+    820740951039755ULL, 1216053436896834ULL, 23954895815139ULL, 1662515208920491ULL,
+    1705443427511899ULL, 1957928899570365ULL, 1189636258255725ULL, 1795695471103809ULL,
+    1691191297654118ULL, 282402585374360ULL, 460405330264832ULL, 63765529445733ULL,
+    469763447404473ULL, 733607089694996ULL, 685410420186959ULL, 1096682630419738ULL,
+    1162548510542362ULL, 1020949526456676ULL, 1211660396870573ULL, 613126398222696ULL,
+    1117829165843251ULL, 742432540886650ULL, 1483755088010658ULL, 942392007134474ULL,
+    1447834130944107ULL, 489368274863410ULL, 23192985544898ULL, 648442406146160ULL,
+    785438843373876ULL, 249464684645238ULL, 170494608205618ULL, 335112827260550ULL,
+    1462050123162735ULL, 1084803668439016ULL, 853459233600325ULL, 215777728187495ULL,
+    1965759433526974ULL, 1349482894446537ULL, 694163317612871ULL, 860536766165036ULL,
+    1178788094084321ULL, 1652739626626996ULL, 2115723946388185ULL, 1577204379094664ULL,
+    1083882859023240ULL, 1768759143381635ULL, 1737180992507258ULL, 246054513922239ULL,
+    577253134087234ULL, 356340280578042ULL, 1638917769925142ULL, 223550348130103ULL,
+    470592666638765ULL, 22663573966996ULL, 596552461152400ULL, 364143537069499ULL, 3942119457699ULL,
+    107951982889287ULL, 1843471406713209ULL, 1625773041610986ULL, 1466141092501702ULL,
+    1043024095021271ULL, 310429964047508ULL, 98559121500372ULL, 152746933782868ULL,
+    259407205078261ULL, 828123093322585ULL, 1576847274280091ULL, 1170871375757302ULL,
+    1588856194642775ULL, 984767822341977ULL, 1141497997993760ULL, 809325345150796ULL,
+    1879837728202511ULL, 201340910657893ULL, 1079157558888483ULL, 1052373448588065ULL,
+    1732036202501778ULL, 2105292670328445ULL, 679751387312402ULL, 1679682144926229ULL,
+    1695823455818780ULL, 498852317075849ULL, 1786555067788433ULL, 1670727545779425ULL,
+    117945875433544ULL, 407939139781844ULL, 854632120023778ULL, 1413383148360437ULL,
+    286030901733673ULL, 1207361858071196ULL, 461340408181417ULL, 1096919590360164ULL,
+    1837594897475685ULL, 533755561544165ULL, 1638688042247712ULL, 1431653684793005ULL,
+    1036458538873559ULL, 390822120341779ULL, 1920929837111618ULL, 543426740024168ULL,
+    645751357799929ULL, 2245025632994463ULL, 1550778638076452ULL, 223738153459949ULL,
+    1337209385492033ULL, 1276967236456531ULL, 1463815821063071ULL, 2070620870191473ULL,
+    1199170709413753ULL, 273230877394166ULL, 1873264887608046ULL, 890877152910775ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)13559344787725U, (uint64_t)2051621493703448U,
-    (uint64_t)1947659315640708U, (uint64_t)626856790370168U, (uint64_t)1592804284034836U,
-    (uint64_t)1781728767459187U, (uint64_t)278818420518009U, (uint64_t)2038030359908351U,
-    (uint64_t)910625973862690U, (uint64_t)471887343142239U, (uint64_t)1298543306606048U,
-    (uint64_t)794147365642417U, (uint64_t)129968992326749U, (uint64_t)523140861678572U,
-    (uint64_t)1166419653909231U, (uint64_t)2009637196928390U, (uint64_t)1288020222395193U,
-    (uint64_t)1007046974985829U, (uint64_t)208981102651386U, (uint64_t)2074009315253380U,
-    (uint64_t)1564056062071967U, (uint64_t)276822668750618U, (uint64_t)206621292512572U,
-    (uint64_t)470304361809269U, (uint64_t)895215438398493U, (uint64_t)1527859053868686U,
-    (uint64_t)1624967223409369U, (uint64_t)811821865979736U, (uint64_t)350450534838340U,
-    (uint64_t)219143807921807U, (uint64_t)507994540371254U, (uint64_t)986513794574720U,
-    (uint64_t)1142661369967121U, (uint64_t)621278293399257U, (uint64_t)556189161519781U,
-    (uint64_t)351964007865066U, (uint64_t)2011573453777822U, (uint64_t)1367125527151537U,
-    (uint64_t)1691316722438196U, (uint64_t)731328817345164U, (uint64_t)1284781192709232U,
-    (uint64_t)478439299539269U, (uint64_t)204842178076429U, (uint64_t)2085125369913651U,
-    (uint64_t)1980773492792985U, (uint64_t)1480264409524940U, (uint64_t)688389585376233U,
-    (uint64_t)612962643526972U, (uint64_t)165595382536676U, (uint64_t)1850300069212263U,
-    (uint64_t)1176357203491551U, (uint64_t)1880164984292321U, (uint64_t)10786153104736U,
-    (uint64_t)1242293560510203U, (uint64_t)1358399951884084U, (uint64_t)1901358796610357U,
-    (uint64_t)1385092558795806U, (uint64_t)1734893785311348U, (uint64_t)2046201851951191U,
-    (uint64_t)1233811309557352U, (uint64_t)1531160168656129U, (uint64_t)1543287181303358U,
-    (uint64_t)516121446374119U, (uint64_t)723422668089935U, (uint64_t)1228176774959679U,
-    (uint64_t)1598014722726267U, (uint64_t)1630810326658412U, (uint64_t)1343833067463760U,
-    (uint64_t)1024397964362099U, (uint64_t)1157142161346781U, (uint64_t)56422174971792U,
-    (uint64_t)544901687297092U, (uint64_t)1291559028869009U, (uint64_t)1336918672345120U,
-    (uint64_t)1390874603281353U, (uint64_t)1127199512010904U, (uint64_t)992644979940964U,
-    (uint64_t)1035213479783573U, (uint64_t)36043651196100U, (uint64_t)1220961519321221U,
-    (uint64_t)1348190007756977U, (uint64_t)579420200329088U, (uint64_t)1703819961008985U,
-    (uint64_t)1993919213460047U, (uint64_t)2225080008232251U, (uint64_t)392785893702372U,
-    (uint64_t)464312521482632U, (uint64_t)1224525362116057U, (uint64_t)810394248933036U,
-    (uint64_t)932513521649107U, (uint64_t)592314953488703U, (uint64_t)586334603791548U,
-    (uint64_t)1310888126096549U, (uint64_t)650842674074281U, (uint64_t)1596447001791059U,
-    (uint64_t)2086767406328284U, (uint64_t)1866377645879940U, (uint64_t)1721604362642743U,
-    (uint64_t)738502322566890U, (uint64_t)1851901097729689U, (uint64_t)1158347571686914U,
-    (uint64_t)2023626733470827U, (uint64_t)329625404653699U, (uint64_t)563555875598551U,
-    (uint64_t)516554588079177U, (uint64_t)1134688306104598U, (uint64_t)186301198420809U,
-    (uint64_t)1339952213563300U, (uint64_t)643605614625891U, (uint64_t)1947505332718043U,
-    (uint64_t)1722071694852824U, (uint64_t)601679570440694U, (uint64_t)1821275721236351U,
-    (uint64_t)1808307842870389U, (uint64_t)1654165204015635U, (uint64_t)1457334100715245U,
-    (uint64_t)217784948678349U, (uint64_t)1820622417674817U, (uint64_t)1946121178444661U,
-    (uint64_t)597980757799332U, (uint64_t)1745271227710764U, (uint64_t)2010952890941980U,
-    (uint64_t)339811849696648U, (uint64_t)1066120666993872U, (uint64_t)261276166508990U,
-    (uint64_t)323098645774553U, (uint64_t)207454744271283U, (uint64_t)941448672977675U,
-    (uint64_t)71890920544375U, (uint64_t)840849789313357U, (uint64_t)1223996070717926U,
-    (uint64_t)196832550853408U, (uint64_t)115986818309231U, (uint64_t)1586171527267675U,
-    (uint64_t)1666169080973450U, (uint64_t)1456454731176365U, (uint64_t)44467854369003U,
-    (uint64_t)2149656190691480U, (uint64_t)283446383597589U, (uint64_t)2040542647729974U,
-    (uint64_t)305705593840224U, (uint64_t)475315822269791U, (uint64_t)648133452550632U,
-    (uint64_t)169218658835720U, (uint64_t)24960052338251U, (uint64_t)938907951346766U,
-    (uint64_t)425970950490510U, (uint64_t)1037622011013183U, (uint64_t)1026882082708180U,
-    (uint64_t)1635699409504916U, (uint64_t)1644776942870488U, (uint64_t)2151820331175914U,
-    (uint64_t)824120674069819U, (uint64_t)835744976610113U, (uint64_t)1991271032313190U,
-    (uint64_t)96507354724855U, (uint64_t)400645405133260U, (uint64_t)343728076650825U,
-    (uint64_t)1151585441385566U, (uint64_t)1403339955333520U, (uint64_t)230186314139774U,
-    (uint64_t)1736248861506714U, (uint64_t)1010804378904572U, (uint64_t)1394932289845636U,
-    (uint64_t)1901351256960852U, (uint64_t)2187471430089807U, (uint64_t)1003853262342670U,
-    (uint64_t)1327743396767461U, (uint64_t)1465160415991740U, (uint64_t)366625359144534U,
-    (uint64_t)1534791405247604U, (uint64_t)1790905930250187U, (uint64_t)1255484115292738U,
-    (uint64_t)2223291365520443U, (uint64_t)210967717407408U, (uint64_t)26722916813442U,
-    (uint64_t)1919574361907910U, (uint64_t)468825088280256U, (uint64_t)2230011775946070U,
-    (uint64_t)1628365642214479U, (uint64_t)568871869234932U, (uint64_t)1066987968780488U,
-    (uint64_t)1692242903745558U, (uint64_t)1678903997328589U, (uint64_t)214262165888021U,
-    (uint64_t)1929686748607204U, (uint64_t)1790138967989670U, (uint64_t)1790261616022076U,
-    (uint64_t)1559824537553112U, (uint64_t)1230364591311358U, (uint64_t)147531939886346U,
-    (uint64_t)1528207085815487U, (uint64_t)477957922927292U, (uint64_t)285670243881618U,
-    (uint64_t)264430080123332U, (uint64_t)1163108160028611U, (uint64_t)373201522147371U,
-    (uint64_t)34903775270979U, (uint64_t)1750870048600662U, (uint64_t)1319328308741084U,
-    (uint64_t)1547548634278984U, (uint64_t)1691259592202927U, (uint64_t)2247758037259814U,
-    (uint64_t)329611399953677U, (uint64_t)1385555496268877U, (uint64_t)2242438354031066U,
-    (uint64_t)1329523854843632U, (uint64_t)399895373846055U, (uint64_t)678005703193452U,
-    (uint64_t)1496357700997771U, (uint64_t)71909969781942U, (uint64_t)1515391418612349U,
-    (uint64_t)470110837888178U, (uint64_t)1981307309417466U, (uint64_t)1259888737412276U,
-    (uint64_t)669991710228712U, (uint64_t)1048546834514303U, (uint64_t)1678323291295512U,
-    (uint64_t)2172033978088071U, (uint64_t)1529278455500556U, (uint64_t)901984601941894U,
-    (uint64_t)780867622403807U, (uint64_t)550105677282793U, (uint64_t)975860231176136U,
-    (uint64_t)525188281689178U, (uint64_t)49966114807992U, (uint64_t)1776449263836645U,
-    (uint64_t)267851776380338U, (uint64_t)2225969494054620U, (uint64_t)2016794225789822U,
-    (uint64_t)1186108678266608U, (uint64_t)1023083271408882U, (uint64_t)1119289418565906U,
-    (uint64_t)1248185897348801U, (uint64_t)1846081539082697U, (uint64_t)23756429626075U,
-    (uint64_t)1441999021105403U, (uint64_t)724497586552825U, (uint64_t)1287761623605379U,
-    (uint64_t)685303359654224U, (uint64_t)2217156930690570U, (uint64_t)163769288918347U,
-    (uint64_t)1098423278284094U, (uint64_t)1391470723006008U, (uint64_t)570700152353516U,
-    (uint64_t)744804507262556U, (uint64_t)2200464788609495U, (uint64_t)624141899161992U,
-    (uint64_t)2249570166275684U, (uint64_t)378706441983561U, (uint64_t)122486379999375U,
-    (uint64_t)430741162798924U, (uint64_t)113847463452574U, (uint64_t)266250457840685U,
-    (uint64_t)2120743625072743U, (uint64_t)222186221043927U, (uint64_t)1964290018305582U,
-    (uint64_t)1435278008132477U, (uint64_t)1670867456663734U, (uint64_t)2009989552599079U,
-    (uint64_t)1348024113448744U, (uint64_t)1158423886300455U, (uint64_t)1356467152691569U,
-    (uint64_t)306943042363674U, (uint64_t)926879628664255U, (uint64_t)1349295689598324U,
-    (uint64_t)725558330071205U, (uint64_t)536569987519948U, (uint64_t)116436990335366U,
-    (uint64_t)1551888573800376U, (uint64_t)2044698345945451U, (uint64_t)104279940291311U,
-    (uint64_t)251526570943220U, (uint64_t)754735828122925U, (uint64_t)33448073576361U,
-    (uint64_t)994605876754543U, (uint64_t)546007584022006U, (uint64_t)2217332798409487U,
-    (uint64_t)706477052561591U, (uint64_t)131174619428653U, (uint64_t)2148698284087243U,
-    (uint64_t)239290486205186U, (uint64_t)2161325796952184U, (uint64_t)1713452845607994U,
-    (uint64_t)1297861562938913U, (uint64_t)1779539876828514U, (uint64_t)1926559018603871U,
-    (uint64_t)296485747893968U, (uint64_t)1859208206640686U, (uint64_t)538513979002718U,
-    (uint64_t)103998826506137U, (uint64_t)2025375396538469U, (uint64_t)1370680785701206U,
-    (uint64_t)1698557311253840U, (uint64_t)1411096399076595U, (uint64_t)2132580530813677U,
-    (uint64_t)2071564345845035U, (uint64_t)498581428556735U, (uint64_t)1136010486691371U,
-    (uint64_t)1927619356993146U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 13559344787725ULL, 2051621493703448ULL, 1947659315640708ULL,
+    626856790370168ULL, 1592804284034836ULL, 1781728767459187ULL, 278818420518009ULL,
+    2038030359908351ULL, 910625973862690ULL, 471887343142239ULL, 1298543306606048ULL,
+    794147365642417ULL, 129968992326749ULL, 523140861678572ULL, 1166419653909231ULL,
+    2009637196928390ULL, 1288020222395193ULL, 1007046974985829ULL, 208981102651386ULL,
+    2074009315253380ULL, 1564056062071967ULL, 276822668750618ULL, 206621292512572ULL,
+    470304361809269ULL, 895215438398493ULL, 1527859053868686ULL, 1624967223409369ULL,
+    811821865979736ULL, 350450534838340ULL, 219143807921807ULL, 507994540371254ULL,
+    986513794574720ULL, 1142661369967121ULL, 621278293399257ULL, 556189161519781ULL,
+    351964007865066ULL, 2011573453777822ULL, 1367125527151537ULL, 1691316722438196ULL,
+    731328817345164ULL, 1284781192709232ULL, 478439299539269ULL, 204842178076429ULL,
+    2085125369913651ULL, 1980773492792985ULL, 1480264409524940ULL, 688389585376233ULL,
+    612962643526972ULL, 165595382536676ULL, 1850300069212263ULL, 1176357203491551ULL,
+    1880164984292321ULL, 10786153104736ULL, 1242293560510203ULL, 1358399951884084ULL,
+    1901358796610357ULL, 1385092558795806ULL, 1734893785311348ULL, 2046201851951191ULL,
+    1233811309557352ULL, 1531160168656129ULL, 1543287181303358ULL, 516121446374119ULL,
+    723422668089935ULL, 1228176774959679ULL, 1598014722726267ULL, 1630810326658412ULL,
+    1343833067463760ULL, 1024397964362099ULL, 1157142161346781ULL, 56422174971792ULL,
+    544901687297092ULL, 1291559028869009ULL, 1336918672345120ULL, 1390874603281353ULL,
+    1127199512010904ULL, 992644979940964ULL, 1035213479783573ULL, 36043651196100ULL,
+    1220961519321221ULL, 1348190007756977ULL, 579420200329088ULL, 1703819961008985ULL,
+    1993919213460047ULL, 2225080008232251ULL, 392785893702372ULL, 464312521482632ULL,
+    1224525362116057ULL, 810394248933036ULL, 932513521649107ULL, 592314953488703ULL,
+    586334603791548ULL, 1310888126096549ULL, 650842674074281ULL, 1596447001791059ULL,
+    2086767406328284ULL, 1866377645879940ULL, 1721604362642743ULL, 738502322566890ULL,
+    1851901097729689ULL, 1158347571686914ULL, 2023626733470827ULL, 329625404653699ULL,
+    563555875598551ULL, 516554588079177ULL, 1134688306104598ULL, 186301198420809ULL,
+    1339952213563300ULL, 643605614625891ULL, 1947505332718043ULL, 1722071694852824ULL,
+    601679570440694ULL, 1821275721236351ULL, 1808307842870389ULL, 1654165204015635ULL,
+    1457334100715245ULL, 217784948678349ULL, 1820622417674817ULL, 1946121178444661ULL,
+    597980757799332ULL, 1745271227710764ULL, 2010952890941980ULL, 339811849696648ULL,
+    1066120666993872ULL, 261276166508990ULL, 323098645774553ULL, 207454744271283ULL,
+    941448672977675ULL, 71890920544375ULL, 840849789313357ULL, 1223996070717926ULL,
+    196832550853408ULL, 115986818309231ULL, 1586171527267675ULL, 1666169080973450ULL,
+    1456454731176365ULL, 44467854369003ULL, 2149656190691480ULL, 283446383597589ULL,
+    2040542647729974ULL, 305705593840224ULL, 475315822269791ULL, 648133452550632ULL,
+    169218658835720ULL, 24960052338251ULL, 938907951346766ULL, 425970950490510ULL,
+    1037622011013183ULL, 1026882082708180ULL, 1635699409504916ULL, 1644776942870488ULL,
+    2151820331175914ULL, 824120674069819ULL, 835744976610113ULL, 1991271032313190ULL,
+    96507354724855ULL, 400645405133260ULL, 343728076650825ULL, 1151585441385566ULL,
+    1403339955333520ULL, 230186314139774ULL, 1736248861506714ULL, 1010804378904572ULL,
+    1394932289845636ULL, 1901351256960852ULL, 2187471430089807ULL, 1003853262342670ULL,
+    1327743396767461ULL, 1465160415991740ULL, 366625359144534ULL, 1534791405247604ULL,
+    1790905930250187ULL, 1255484115292738ULL, 2223291365520443ULL, 210967717407408ULL,
+    26722916813442ULL, 1919574361907910ULL, 468825088280256ULL, 2230011775946070ULL,
+    1628365642214479ULL, 568871869234932ULL, 1066987968780488ULL, 1692242903745558ULL,
+    1678903997328589ULL, 214262165888021ULL, 1929686748607204ULL, 1790138967989670ULL,
+    1790261616022076ULL, 1559824537553112ULL, 1230364591311358ULL, 147531939886346ULL,
+    1528207085815487ULL, 477957922927292ULL, 285670243881618ULL, 264430080123332ULL,
+    1163108160028611ULL, 373201522147371ULL, 34903775270979ULL, 1750870048600662ULL,
+    1319328308741084ULL, 1547548634278984ULL, 1691259592202927ULL, 2247758037259814ULL,
+    329611399953677ULL, 1385555496268877ULL, 2242438354031066ULL, 1329523854843632ULL,
+    399895373846055ULL, 678005703193452ULL, 1496357700997771ULL, 71909969781942ULL,
+    1515391418612349ULL, 470110837888178ULL, 1981307309417466ULL, 1259888737412276ULL,
+    669991710228712ULL, 1048546834514303ULL, 1678323291295512ULL, 2172033978088071ULL,
+    1529278455500556ULL, 901984601941894ULL, 780867622403807ULL, 550105677282793ULL,
+    975860231176136ULL, 525188281689178ULL, 49966114807992ULL, 1776449263836645ULL,
+    267851776380338ULL, 2225969494054620ULL, 2016794225789822ULL, 1186108678266608ULL,
+    1023083271408882ULL, 1119289418565906ULL, 1248185897348801ULL, 1846081539082697ULL,
+    23756429626075ULL, 1441999021105403ULL, 724497586552825ULL, 1287761623605379ULL,
+    685303359654224ULL, 2217156930690570ULL, 163769288918347ULL, 1098423278284094ULL,
+    1391470723006008ULL, 570700152353516ULL, 744804507262556ULL, 2200464788609495ULL,
+    624141899161992ULL, 2249570166275684ULL, 378706441983561ULL, 122486379999375ULL,
+    430741162798924ULL, 113847463452574ULL, 266250457840685ULL, 2120743625072743ULL,
+    222186221043927ULL, 1964290018305582ULL, 1435278008132477ULL, 1670867456663734ULL,
+    2009989552599079ULL, 1348024113448744ULL, 1158423886300455ULL, 1356467152691569ULL,
+    306943042363674ULL, 926879628664255ULL, 1349295689598324ULL, 725558330071205ULL,
+    536569987519948ULL, 116436990335366ULL, 1551888573800376ULL, 2044698345945451ULL,
+    104279940291311ULL, 251526570943220ULL, 754735828122925ULL, 33448073576361ULL,
+    994605876754543ULL, 546007584022006ULL, 2217332798409487ULL, 706477052561591ULL,
+    131174619428653ULL, 2148698284087243ULL, 239290486205186ULL, 2161325796952184ULL,
+    1713452845607994ULL, 1297861562938913ULL, 1779539876828514ULL, 1926559018603871ULL,
+    296485747893968ULL, 1859208206640686ULL, 538513979002718ULL, 103998826506137ULL,
+    2025375396538469ULL, 1370680785701206ULL, 1698557311253840ULL, 1411096399076595ULL,
+    2132580530813677ULL, 2071564345845035ULL, 498581428556735ULL, 1136010486691371ULL,
+    1927619356993146ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)557549315715710U, (uint64_t)196756086293855U,
-    (uint64_t)846062225082495U, (uint64_t)1865068224838092U, (uint64_t)991112090754908U,
-    (uint64_t)522916421512828U, (uint64_t)2098523346722375U, (uint64_t)1135633221747012U,
-    (uint64_t)858420432114866U, (uint64_t)186358544306082U, (uint64_t)1044420411868480U,
-    (uint64_t)2080052304349321U, (uint64_t)557301814716724U, (uint64_t)1305130257814057U,
-    (uint64_t)2126012765451197U, (uint64_t)1441004402875101U, (uint64_t)353948968859203U,
-    (uint64_t)470765987164835U, (uint64_t)1507675957683570U, (uint64_t)1086650358745097U,
-    (uint64_t)1911913434398388U, (uint64_t)66086091117182U, (uint64_t)1137511952425971U,
-    (uint64_t)36958263512141U, (uint64_t)2193310025325256U, (uint64_t)1085191426269045U,
-    (uint64_t)1232148267909446U, (uint64_t)1449894406170117U, (uint64_t)1241416717139557U,
-    (uint64_t)1940876999212868U, (uint64_t)829758415918121U, (uint64_t)309608450373449U,
-    (uint64_t)2228398547683851U, (uint64_t)1580623271960188U, (uint64_t)1675601502456740U,
-    (uint64_t)1360363115493548U, (uint64_t)1098397313096815U, (uint64_t)1809255384359797U,
-    (uint64_t)1458261916834384U, (uint64_t)210682545649705U, (uint64_t)1606836641068115U,
-    (uint64_t)1230478270405318U, (uint64_t)1843192771547802U, (uint64_t)1794596343564051U,
-    (uint64_t)229060710252162U, (uint64_t)2169742775467181U, (uint64_t)701467067318072U,
-    (uint64_t)696018499035555U, (uint64_t)521051885339807U, (uint64_t)158329567901874U,
-    (uint64_t)740426481832143U, (uint64_t)1369811177301441U, (uint64_t)503351589084015U,
-    (uint64_t)1781114827942261U, (uint64_t)1650493549693035U, (uint64_t)2174562418345156U,
-    (uint64_t)456517194809244U, (uint64_t)2052761522121179U, (uint64_t)2233342271123682U,
-    (uint64_t)1445872925177435U, (uint64_t)1131882576902813U, (uint64_t)220765848055241U,
-    (uint64_t)1280259961403769U, (uint64_t)1581497080160712U, (uint64_t)1477441080108824U,
-    (uint64_t)218428165202767U, (uint64_t)1970598141278907U, (uint64_t)643366736173069U,
-    (uint64_t)2167909426804014U, (uint64_t)834993711408259U, (uint64_t)1922437166463212U,
-    (uint64_t)1900036281472252U, (uint64_t)513794844386304U, (uint64_t)1297904164900114U,
-    (uint64_t)1147626295373268U, (uint64_t)1910101606251299U, (uint64_t)182933838633381U,
-    (uint64_t)806229530787362U, (uint64_t)155511666433200U, (uint64_t)290522463375462U,
-    (uint64_t)534373523491751U, (uint64_t)1302938814480515U, (uint64_t)1664979184120445U,
-    (uint64_t)304235649499423U, (uint64_t)339284524318609U, (uint64_t)1881717946973483U,
-    (uint64_t)1670802286833842U, (uint64_t)2223637120675737U, (uint64_t)135818919485814U,
-    (uint64_t)1144856572842792U, (uint64_t)2234981613434386U, (uint64_t)963917024969826U,
-    (uint64_t)402275378284993U, (uint64_t)141532417412170U, (uint64_t)921537468739387U,
-    (uint64_t)963905069722607U, (uint64_t)1405442890733358U, (uint64_t)1567763927164655U,
-    (uint64_t)1664776329195930U, (uint64_t)2095924165508507U, (uint64_t)994243110271379U,
-    (uint64_t)1243925610609353U, (uint64_t)1029845815569727U, (uint64_t)1001968867985629U,
-    (uint64_t)170368934002484U, (uint64_t)1100906131583801U, (uint64_t)1825190326449569U,
-    (uint64_t)1462285121182096U, (uint64_t)1545240767016377U, (uint64_t)797859025652273U,
-    (uint64_t)1062758326657530U, (uint64_t)1125600735118266U, (uint64_t)739325756774527U,
-    (uint64_t)1420144485966996U, (uint64_t)1915492743426702U, (uint64_t)752968196344993U,
-    (uint64_t)882156396938351U, (uint64_t)1909097048763227U, (uint64_t)849058590685611U,
-    (uint64_t)840754951388500U, (uint64_t)1832926948808323U, (uint64_t)2023317100075297U,
-    (uint64_t)322382745442827U, (uint64_t)1569741341737601U, (uint64_t)1678986113194987U,
-    (uint64_t)757598994581938U, (uint64_t)29678659580705U, (uint64_t)1239680935977986U,
-    (uint64_t)1509239427168474U, (uint64_t)1055981929287006U, (uint64_t)1894085471158693U,
-    (uint64_t)916486225488490U, (uint64_t)642168890366120U, (uint64_t)300453362620010U,
-    (uint64_t)1858797242721481U, (uint64_t)2077989823177130U, (uint64_t)510228455273334U,
-    (uint64_t)1473284798689270U, (uint64_t)5173934574301U, (uint64_t)765285232030050U,
-    (uint64_t)1007154707631065U, (uint64_t)1862128712885972U, (uint64_t)168873464821340U,
-    (uint64_t)1967853269759318U, (uint64_t)1489896018263031U, (uint64_t)592451806166369U,
-    (uint64_t)1242298565603883U, (uint64_t)1838918921339058U, (uint64_t)697532763910695U,
-    (uint64_t)294335466239059U, (uint64_t)135687058387449U, (uint64_t)2133734403874176U,
-    (uint64_t)2121911143127699U, (uint64_t)20222476737364U, (uint64_t)1200824626476747U,
-    (uint64_t)1397731736540791U, (uint64_t)702378430231418U, (uint64_t)59059527640068U,
-    (uint64_t)460992547183981U, (uint64_t)1016125857842765U, (uint64_t)1273530839608957U,
-    (uint64_t)96724128829301U, (uint64_t)1313433042425233U, (uint64_t)3543822857227U,
-    (uint64_t)761975685357118U, (uint64_t)110417360745248U, (uint64_t)1079634164577663U,
-    (uint64_t)2044574510020457U, (uint64_t)338709058603120U, (uint64_t)94541336042799U,
-    (uint64_t)127963233585039U, (uint64_t)94427896272258U, (uint64_t)1143501979342182U,
-    (uint64_t)1217958006212230U, (uint64_t)2153887831492134U, (uint64_t)1519219513255575U,
-    (uint64_t)251793195454181U, (uint64_t)392517349345200U, (uint64_t)1507033011868881U,
-    (uint64_t)2208494254670752U, (uint64_t)1364389582694359U, (uint64_t)2214069430728063U,
-    (uint64_t)1272814257105752U, (uint64_t)741450148906352U, (uint64_t)1105776675555685U,
-    (uint64_t)824447222014984U, (uint64_t)528745219306376U, (uint64_t)589427609121575U,
-    (uint64_t)1501786838809155U, (uint64_t)379067373073147U, (uint64_t)184909476589356U,
-    (uint64_t)1346887560616185U, (uint64_t)1932023742314082U, (uint64_t)1633302311869264U,
-    (uint64_t)1685314821133069U, (uint64_t)1836610282047884U, (uint64_t)1595571594397150U,
-    (uint64_t)615441688872198U, (uint64_t)1926435616702564U, (uint64_t)235632180396480U,
-    (uint64_t)1051918343571810U, (uint64_t)2150570051687050U, (uint64_t)879198845408738U,
-    (uint64_t)1443966275205464U, (uint64_t)481362545245088U, (uint64_t)512807443532642U,
-    (uint64_t)641147578283480U, (uint64_t)1594276116945596U, (uint64_t)1844812743300602U,
-    (uint64_t)2044559316019485U, (uint64_t)202620777969020U, (uint64_t)852992984136302U,
-    (uint64_t)1500869642692910U, (uint64_t)1085216217052457U, (uint64_t)1736294372259758U,
-    (uint64_t)2009666354486552U, (uint64_t)1262389020715248U, (uint64_t)1166527705256867U,
-    (uint64_t)1409917450806036U, (uint64_t)1705819160057637U, (uint64_t)1116901782584378U,
-    (uint64_t)1278460472285473U, (uint64_t)257879811360157U, (uint64_t)40314007176886U,
-    (uint64_t)701309846749639U, (uint64_t)1380457676672777U, (uint64_t)631519782380272U,
-    (uint64_t)1196339573466793U, (uint64_t)955537708940017U, (uint64_t)532725633381530U,
-    (uint64_t)641190593731833U, (uint64_t)7214357153807U, (uint64_t)481922072107983U,
-    (uint64_t)1634886189207352U, (uint64_t)1247659758261633U, (uint64_t)1655809614786430U,
-    (uint64_t)43105797900223U, (uint64_t)76205809912607U, (uint64_t)1936575107455823U,
-    (uint64_t)1107927314642236U, (uint64_t)2199986333469333U, (uint64_t)802974829322510U,
-    (uint64_t)718173128143482U, (uint64_t)539385184235615U, (uint64_t)2075693785611221U,
-    (uint64_t)953281147333690U, (uint64_t)1623571637172587U, (uint64_t)655274535022250U,
-    (uint64_t)1568078078819021U, (uint64_t)101142125049712U, (uint64_t)1488441673350881U,
-    (uint64_t)1457969561944515U, (uint64_t)1492622544287712U, (uint64_t)2041460689280803U,
-    (uint64_t)1961848091392887U, (uint64_t)461003520846938U, (uint64_t)934728060399807U,
-    (uint64_t)117723291519705U, (uint64_t)1027773762863526U, (uint64_t)56765304991567U,
-    (uint64_t)2184028379550479U, (uint64_t)1768767711894030U, (uint64_t)1304432068983172U,
-    (uint64_t)498080974452325U, (uint64_t)2134905654858163U, (uint64_t)1446137427202647U,
-    (uint64_t)551613831549590U, (uint64_t)680288767054205U, (uint64_t)1278113339140386U,
-    (uint64_t)378149431842614U, (uint64_t)80520494426960U, (uint64_t)2080985256348782U,
-    (uint64_t)673432591799820U, (uint64_t)739189463724560U, (uint64_t)1847191452197509U,
-    (uint64_t)527737312871602U, (uint64_t)477609358840073U, (uint64_t)1891633072677946U,
-    (uint64_t)1841456828278466U, (uint64_t)2242502936489002U, (uint64_t)524791829362709U,
-    (uint64_t)276648168514036U, (uint64_t)991706903257619U, (uint64_t)512580228297906U,
-    (uint64_t)1216855104975946U, (uint64_t)67030930303149U, (uint64_t)769593945208213U,
-    (uint64_t)2048873385103577U, (uint64_t)455635274123107U, (uint64_t)2077404927176696U,
-    (uint64_t)1803539634652306U, (uint64_t)1837579953843417U, (uint64_t)1564240068662828U,
-    (uint64_t)1964310918970435U, (uint64_t)832822906252492U, (uint64_t)1516044634195010U,
-    (uint64_t)770571447506889U, (uint64_t)602215152486818U, (uint64_t)1760828333136947U,
-    (uint64_t)730156776030376U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 557549315715710ULL, 196756086293855ULL, 846062225082495ULL,
+    1865068224838092ULL, 991112090754908ULL, 522916421512828ULL, 2098523346722375ULL,
+    1135633221747012ULL, 858420432114866ULL, 186358544306082ULL, 1044420411868480ULL,
+    2080052304349321ULL, 557301814716724ULL, 1305130257814057ULL, 2126012765451197ULL,
+    1441004402875101ULL, 353948968859203ULL, 470765987164835ULL, 1507675957683570ULL,
+    1086650358745097ULL, 1911913434398388ULL, 66086091117182ULL, 1137511952425971ULL,
+    36958263512141ULL, 2193310025325256ULL, 1085191426269045ULL, 1232148267909446ULL,
+    1449894406170117ULL, 1241416717139557ULL, 1940876999212868ULL, 829758415918121ULL,
+    309608450373449ULL, 2228398547683851ULL, 1580623271960188ULL, 1675601502456740ULL,
+    1360363115493548ULL, 1098397313096815ULL, 1809255384359797ULL, 1458261916834384ULL,
+    210682545649705ULL, 1606836641068115ULL, 1230478270405318ULL, 1843192771547802ULL,
+    1794596343564051ULL, 229060710252162ULL, 2169742775467181ULL, 701467067318072ULL,
+    696018499035555ULL, 521051885339807ULL, 158329567901874ULL, 740426481832143ULL,
+    1369811177301441ULL, 503351589084015ULL, 1781114827942261ULL, 1650493549693035ULL,
+    2174562418345156ULL, 456517194809244ULL, 2052761522121179ULL, 2233342271123682ULL,
+    1445872925177435ULL, 1131882576902813ULL, 220765848055241ULL, 1280259961403769ULL,
+    1581497080160712ULL, 1477441080108824ULL, 218428165202767ULL, 1970598141278907ULL,
+    643366736173069ULL, 2167909426804014ULL, 834993711408259ULL, 1922437166463212ULL,
+    1900036281472252ULL, 513794844386304ULL, 1297904164900114ULL, 1147626295373268ULL,
+    1910101606251299ULL, 182933838633381ULL, 806229530787362ULL, 155511666433200ULL,
+    290522463375462ULL, 534373523491751ULL, 1302938814480515ULL, 1664979184120445ULL,
+    304235649499423ULL, 339284524318609ULL, 1881717946973483ULL, 1670802286833842ULL,
+    2223637120675737ULL, 135818919485814ULL, 1144856572842792ULL, 2234981613434386ULL,
+    963917024969826ULL, 402275378284993ULL, 141532417412170ULL, 921537468739387ULL,
+    963905069722607ULL, 1405442890733358ULL, 1567763927164655ULL, 1664776329195930ULL,
+    2095924165508507ULL, 994243110271379ULL, 1243925610609353ULL, 1029845815569727ULL,
+    1001968867985629ULL, 170368934002484ULL, 1100906131583801ULL, 1825190326449569ULL,
+    1462285121182096ULL, 1545240767016377ULL, 797859025652273ULL, 1062758326657530ULL,
+    1125600735118266ULL, 739325756774527ULL, 1420144485966996ULL, 1915492743426702ULL,
+    752968196344993ULL, 882156396938351ULL, 1909097048763227ULL, 849058590685611ULL,
+    840754951388500ULL, 1832926948808323ULL, 2023317100075297ULL, 322382745442827ULL,
+    1569741341737601ULL, 1678986113194987ULL, 757598994581938ULL, 29678659580705ULL,
+    1239680935977986ULL, 1509239427168474ULL, 1055981929287006ULL, 1894085471158693ULL,
+    916486225488490ULL, 642168890366120ULL, 300453362620010ULL, 1858797242721481ULL,
+    2077989823177130ULL, 510228455273334ULL, 1473284798689270ULL, 5173934574301ULL,
+    765285232030050ULL, 1007154707631065ULL, 1862128712885972ULL, 168873464821340ULL,
+    1967853269759318ULL, 1489896018263031ULL, 592451806166369ULL, 1242298565603883ULL,
+    1838918921339058ULL, 697532763910695ULL, 294335466239059ULL, 135687058387449ULL,
+    2133734403874176ULL, 2121911143127699ULL, 20222476737364ULL, 1200824626476747ULL,
+    1397731736540791ULL, 702378430231418ULL, 59059527640068ULL, 460992547183981ULL,
+    1016125857842765ULL, 1273530839608957ULL, 96724128829301ULL, 1313433042425233ULL,
+    3543822857227ULL, 761975685357118ULL, 110417360745248ULL, 1079634164577663ULL,
+    2044574510020457ULL, 338709058603120ULL, 94541336042799ULL, 127963233585039ULL,
+    94427896272258ULL, 1143501979342182ULL, 1217958006212230ULL, 2153887831492134ULL,
+    1519219513255575ULL, 251793195454181ULL, 392517349345200ULL, 1507033011868881ULL,
+    2208494254670752ULL, 1364389582694359ULL, 2214069430728063ULL, 1272814257105752ULL,
+    741450148906352ULL, 1105776675555685ULL, 824447222014984ULL, 528745219306376ULL,
+    589427609121575ULL, 1501786838809155ULL, 379067373073147ULL, 184909476589356ULL,
+    1346887560616185ULL, 1932023742314082ULL, 1633302311869264ULL, 1685314821133069ULL,
+    1836610282047884ULL, 1595571594397150ULL, 615441688872198ULL, 1926435616702564ULL,
+    235632180396480ULL, 1051918343571810ULL, 2150570051687050ULL, 879198845408738ULL,
+    1443966275205464ULL, 481362545245088ULL, 512807443532642ULL, 641147578283480ULL,
+    1594276116945596ULL, 1844812743300602ULL, 2044559316019485ULL, 202620777969020ULL,
+    852992984136302ULL, 1500869642692910ULL, 1085216217052457ULL, 1736294372259758ULL,
+    2009666354486552ULL, 1262389020715248ULL, 1166527705256867ULL, 1409917450806036ULL,
+    1705819160057637ULL, 1116901782584378ULL, 1278460472285473ULL, 257879811360157ULL,
+    40314007176886ULL, 701309846749639ULL, 1380457676672777ULL, 631519782380272ULL,
+    1196339573466793ULL, 955537708940017ULL, 532725633381530ULL, 641190593731833ULL,
+    7214357153807ULL, 481922072107983ULL, 1634886189207352ULL, 1247659758261633ULL,
+    1655809614786430ULL, 43105797900223ULL, 76205809912607ULL, 1936575107455823ULL,
+    1107927314642236ULL, 2199986333469333ULL, 802974829322510ULL, 718173128143482ULL,
+    539385184235615ULL, 2075693785611221ULL, 953281147333690ULL, 1623571637172587ULL,
+    655274535022250ULL, 1568078078819021ULL, 101142125049712ULL, 1488441673350881ULL,
+    1457969561944515ULL, 1492622544287712ULL, 2041460689280803ULL, 1961848091392887ULL,
+    461003520846938ULL, 934728060399807ULL, 117723291519705ULL, 1027773762863526ULL,
+    56765304991567ULL, 2184028379550479ULL, 1768767711894030ULL, 1304432068983172ULL,
+    498080974452325ULL, 2134905654858163ULL, 1446137427202647ULL, 551613831549590ULL,
+    680288767054205ULL, 1278113339140386ULL, 378149431842614ULL, 80520494426960ULL,
+    2080985256348782ULL, 673432591799820ULL, 739189463724560ULL, 1847191452197509ULL,
+    527737312871602ULL, 477609358840073ULL, 1891633072677946ULL, 1841456828278466ULL,
+    2242502936489002ULL, 524791829362709ULL, 276648168514036ULL, 991706903257619ULL,
+    512580228297906ULL, 1216855104975946ULL, 67030930303149ULL, 769593945208213ULL,
+    2048873385103577ULL, 455635274123107ULL, 2077404927176696ULL, 1803539634652306ULL,
+    1837579953843417ULL, 1564240068662828ULL, 1964310918970435ULL, 832822906252492ULL,
+    1516044634195010ULL, 770571447506889ULL, 602215152486818ULL, 1760828333136947ULL,
+    730156776030376ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4[320U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1129953239743101U, (uint64_t)1240339163956160U,
-    (uint64_t)61002583352401U, (uint64_t)2017604552196030U, (uint64_t)1576867829229863U,
-    (uint64_t)1508654942849389U, (uint64_t)270111619664077U, (uint64_t)1253097517254054U,
-    (uint64_t)721798270973250U, (uint64_t)161923365415298U, (uint64_t)828530877526011U,
-    (uint64_t)1494851059386763U, (uint64_t)662034171193976U, (uint64_t)1315349646974670U,
-    (uint64_t)2199229517308806U, (uint64_t)497078277852673U, (uint64_t)1310507715989956U,
-    (uint64_t)1881315714002105U, (uint64_t)2214039404983803U, (uint64_t)1331036420272667U,
-    (uint64_t)296286697520787U, (uint64_t)1179367922639127U, (uint64_t)25348441419697U,
-    (uint64_t)2200984961703188U, (uint64_t)150893128908291U, (uint64_t)1978614888570852U,
-    (uint64_t)1539657347172046U, (uint64_t)553810196523619U, (uint64_t)246017573977646U,
-    (uint64_t)1440448985385485U, (uint64_t)346049108099981U, (uint64_t)601166606218546U,
-    (uint64_t)855822004151713U, (uint64_t)1957521326383188U, (uint64_t)1114240380430887U,
-    (uint64_t)1349639675122048U, (uint64_t)957375954499040U, (uint64_t)111551795360136U,
-    (uint64_t)618586733648988U, (uint64_t)490708840688866U, (uint64_t)1267002049697314U,
-    (uint64_t)1130723224930028U, (uint64_t)215603029480828U, (uint64_t)1277138555414710U,
-    (uint64_t)1556750324971322U, (uint64_t)1407903521793741U, (uint64_t)1836836546590749U,
-    (uint64_t)576500297444199U, (uint64_t)2074707599091135U, (uint64_t)1826239864380012U,
-    (uint64_t)1935365705983312U, (uint64_t)239501825683682U, (uint64_t)1594236669034980U,
-    (uint64_t)1283078975055301U, (uint64_t)856745636255925U, (uint64_t)1342128647959981U,
-    (uint64_t)945216428379689U, (uint64_t)938746202496410U, (uint64_t)105775123333919U,
-    (uint64_t)1379852610117266U, (uint64_t)1770216827500275U, (uint64_t)1016017267535704U,
-    (uint64_t)1902885522469532U, (uint64_t)994184703730489U, (uint64_t)2227487538793763U,
-    (uint64_t)53155967096055U, (uint64_t)1264120808114350U, (uint64_t)1334928769376729U,
-    (uint64_t)393911808079997U, (uint64_t)826229239481845U, (uint64_t)1827903006733192U,
-    (uint64_t)1449283706008465U, (uint64_t)1258040415217849U, (uint64_t)1641484112868370U,
-    (uint64_t)1140150841968176U, (uint64_t)391113338021313U, (uint64_t)162138667815833U,
-    (uint64_t)742204396566060U, (uint64_t)110709233440557U, (uint64_t)90179377432917U,
-    (uint64_t)530511949644489U, (uint64_t)911568635552279U, (uint64_t)135869304780166U,
-    (uint64_t)617719999563692U, (uint64_t)1802525001631319U, (uint64_t)1836394639510490U,
-    (uint64_t)1862739456475085U, (uint64_t)1378284444664288U, (uint64_t)1617882529391756U,
-    (uint64_t)876124429891172U, (uint64_t)1147654641445091U, (uint64_t)1476943370400542U,
-    (uint64_t)688601222759067U, (uint64_t)2120281968990205U, (uint64_t)1387113236912611U,
-    (uint64_t)2125245820685788U, (uint64_t)1030674016350092U, (uint64_t)1594684598654247U,
-    (uint64_t)1165939511879820U, (uint64_t)271499323244173U, (uint64_t)546587254515484U,
-    (uint64_t)945603425742936U, (uint64_t)1242252568170226U, (uint64_t)561598728058142U,
-    (uint64_t)604827091794712U, (uint64_t)19869753585186U, (uint64_t)565367744708915U,
-    (uint64_t)536755754533603U, (uint64_t)1767258313589487U, (uint64_t)907952975936127U,
-    (uint64_t)292851652613937U, (uint64_t)163573546237963U, (uint64_t)837601408384564U,
-    (uint64_t)591996990118301U, (uint64_t)2126051747693057U, (uint64_t)182247548824566U,
-    (uint64_t)908369044122868U, (uint64_t)1335442699947273U, (uint64_t)2234292296528612U,
-    (uint64_t)689537529333034U, (uint64_t)2174778663790714U, (uint64_t)1011407643592667U,
-    (uint64_t)1856130618715473U, (uint64_t)1557437221651741U, (uint64_t)2250285407006102U,
-    (uint64_t)1412384213410827U, (uint64_t)1428042038612456U, (uint64_t)962709733973660U,
-    (uint64_t)313995703125919U, (uint64_t)1844969155869325U, (uint64_t)787716782673657U,
-    (uint64_t)622504542173478U, (uint64_t)930119043384654U, (uint64_t)2128870043952488U,
-    (uint64_t)537781531479523U, (uint64_t)1556666269904940U, (uint64_t)417333635741346U,
-    (uint64_t)1986743846438415U, (uint64_t)877620478041197U, (uint64_t)2205624582983829U,
-    (uint64_t)595260668884488U, (uint64_t)2025159350373157U, (uint64_t)2091659716088235U,
-    (uint64_t)1423634716596391U, (uint64_t)653686638634080U, (uint64_t)1972388399989956U,
-    (uint64_t)795575741798014U, (uint64_t)889240107997846U, (uint64_t)1446156876910732U,
-    (uint64_t)1028507012221776U, (uint64_t)1071697574586478U, (uint64_t)1689630411899691U,
-    (uint64_t)604092816502174U, (uint64_t)1909917373896122U, (uint64_t)1602544877643837U,
-    (uint64_t)1227177032923867U, (uint64_t)62684197535630U, (uint64_t)186146290753883U,
-    (uint64_t)414449055316766U, (uint64_t)1560555880866750U, (uint64_t)157579947096755U,
-    (uint64_t)230526795502384U, (uint64_t)1197673369665894U, (uint64_t)593779215869037U,
-    (uint64_t)214638834474097U, (uint64_t)1796344443484478U, (uint64_t)493550548257317U,
-    (uint64_t)1628442824033694U, (uint64_t)1410811655893495U, (uint64_t)1009361960995171U,
-    (uint64_t)604736219740352U, (uint64_t)392445928555351U, (uint64_t)1254295770295706U,
-    (uint64_t)1958074535046128U, (uint64_t)508699942241019U, (uint64_t)739405911261325U,
-    (uint64_t)1678760393882409U, (uint64_t)517763708545996U, (uint64_t)640040257898722U,
-    (uint64_t)384966810872913U, (uint64_t)407454748380128U, (uint64_t)152604679407451U,
-    (uint64_t)185102854927662U, (uint64_t)1448175503649595U, (uint64_t)100328519208674U,
-    (uint64_t)1153263667012830U, (uint64_t)1643926437586490U, (uint64_t)609632142834154U,
-    (uint64_t)980984004749261U, (uint64_t)855290732258779U, (uint64_t)2186022163021506U,
-    (uint64_t)1254052618626070U, (uint64_t)1850030517182611U, (uint64_t)162348933090207U,
-    (uint64_t)1948712273679932U, (uint64_t)1331832516262191U, (uint64_t)1219400369175863U,
-    (uint64_t)89689036937483U, (uint64_t)1554886057235815U, (uint64_t)1520047528432789U,
-    (uint64_t)81263957652811U, (uint64_t)146612464257008U, (uint64_t)2207945627164163U,
-    (uint64_t)919846660682546U, (uint64_t)1925694087906686U, (uint64_t)2102027292388012U,
-    (uint64_t)887992003198635U, (uint64_t)1817924871537027U, (uint64_t)746660005584342U,
-    (uint64_t)753757153275525U, (uint64_t)91394270908699U, (uint64_t)511837226544151U,
-    (uint64_t)736341543649373U, (uint64_t)1256371121466367U, (uint64_t)1977778299551813U,
-    (uint64_t)817915174462263U, (uint64_t)1602323381418035U, (uint64_t)190035164572930U,
-    (uint64_t)603796401391181U, (uint64_t)2152666873671669U, (uint64_t)1813900316324112U,
-    (uint64_t)1292622433358041U, (uint64_t)888439870199892U, (uint64_t)978918155071994U,
-    (uint64_t)534184417909805U, (uint64_t)466460084317313U, (uint64_t)1275223140288685U,
-    (uint64_t)786407043883517U, (uint64_t)1620520623925754U, (uint64_t)1753625021290269U,
-    (uint64_t)751937175104525U, (uint64_t)905301961820613U, (uint64_t)697059847245437U,
-    (uint64_t)584919033981144U, (uint64_t)1272165506533156U, (uint64_t)1532180021450866U,
-    (uint64_t)1901407354005301U, (uint64_t)1421319720492586U, (uint64_t)2179081609765456U,
-    (uint64_t)2193253156667632U, (uint64_t)1080248329608584U, (uint64_t)2158422436462066U,
-    (uint64_t)759167597017850U, (uint64_t)545759071151285U, (uint64_t)641600428493698U,
-    (uint64_t)943791424499848U, (uint64_t)469571542427864U, (uint64_t)951117845222467U,
-    (uint64_t)1780538594373407U, (uint64_t)614611122040309U, (uint64_t)1354826131886963U,
-    (uint64_t)221898131992340U, (uint64_t)1145699723916219U, (uint64_t)798735379961769U,
-    (uint64_t)1843560518208287U, (uint64_t)1424523160161545U, (uint64_t)205549016574779U,
-    (uint64_t)2239491587362749U, (uint64_t)1918363582399888U, (uint64_t)1292183072788455U,
-    (uint64_t)1783513123192567U, (uint64_t)1584027954317205U, (uint64_t)1890421443925740U,
-    (uint64_t)1718459319874929U, (uint64_t)1522091040748809U, (uint64_t)399467600667219U,
-    (uint64_t)1870973059066576U, (uint64_t)287514433150348U, (uint64_t)1397845311152885U,
-    (uint64_t)1880440629872863U, (uint64_t)709302939340341U, (uint64_t)1813571361109209U,
-    (uint64_t)86598795876860U, (uint64_t)1146964554310612U, (uint64_t)1590956584862432U,
-    (uint64_t)2097004628155559U, (uint64_t)656227622102390U, (uint64_t)1808500445541891U,
-    (uint64_t)958336726523135U, (uint64_t)2007604569465975U, (uint64_t)313504950390997U,
-    (uint64_t)1399686004953620U, (uint64_t)1759732788465234U, (uint64_t)1562539721055836U,
-    (uint64_t)1575722765016293U, (uint64_t)793318366641259U, (uint64_t)443876859384887U,
-    (uint64_t)547308921989704U, (uint64_t)636698687503328U, (uint64_t)2179175835287340U,
-    (uint64_t)498333551718258U, (uint64_t)932248760026176U, (uint64_t)1612395686304653U,
-    (uint64_t)2179774103745626U, (uint64_t)1359658123541018U, (uint64_t)171488501802442U,
-    (uint64_t)1625034951791350U, (uint64_t)520196922773633U, (uint64_t)1873787546341877U,
-    (uint64_t)303457823885368U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1129953239743101ULL, 1240339163956160ULL, 61002583352401ULL,
+    2017604552196030ULL, 1576867829229863ULL, 1508654942849389ULL, 270111619664077ULL,
+    1253097517254054ULL, 721798270973250ULL, 161923365415298ULL, 828530877526011ULL,
+    1494851059386763ULL, 662034171193976ULL, 1315349646974670ULL, 2199229517308806ULL,
+    497078277852673ULL, 1310507715989956ULL, 1881315714002105ULL, 2214039404983803ULL,
+    1331036420272667ULL, 296286697520787ULL, 1179367922639127ULL, 25348441419697ULL,
+    2200984961703188ULL, 150893128908291ULL, 1978614888570852ULL, 1539657347172046ULL,
+    553810196523619ULL, 246017573977646ULL, 1440448985385485ULL, 346049108099981ULL,
+    601166606218546ULL, 855822004151713ULL, 1957521326383188ULL, 1114240380430887ULL,
+    1349639675122048ULL, 957375954499040ULL, 111551795360136ULL, 618586733648988ULL,
+    490708840688866ULL, 1267002049697314ULL, 1130723224930028ULL, 215603029480828ULL,
+    1277138555414710ULL, 1556750324971322ULL, 1407903521793741ULL, 1836836546590749ULL,
+    576500297444199ULL, 2074707599091135ULL, 1826239864380012ULL, 1935365705983312ULL,
+    239501825683682ULL, 1594236669034980ULL, 1283078975055301ULL, 856745636255925ULL,
+    1342128647959981ULL, 945216428379689ULL, 938746202496410ULL, 105775123333919ULL,
+    1379852610117266ULL, 1770216827500275ULL, 1016017267535704ULL, 1902885522469532ULL,
+    994184703730489ULL, 2227487538793763ULL, 53155967096055ULL, 1264120808114350ULL,
+    1334928769376729ULL, 393911808079997ULL, 826229239481845ULL, 1827903006733192ULL,
+    1449283706008465ULL, 1258040415217849ULL, 1641484112868370ULL, 1140150841968176ULL,
+    391113338021313ULL, 162138667815833ULL, 742204396566060ULL, 110709233440557ULL,
+    90179377432917ULL, 530511949644489ULL, 911568635552279ULL, 135869304780166ULL,
+    617719999563692ULL, 1802525001631319ULL, 1836394639510490ULL, 1862739456475085ULL,
+    1378284444664288ULL, 1617882529391756ULL, 876124429891172ULL, 1147654641445091ULL,
+    1476943370400542ULL, 688601222759067ULL, 2120281968990205ULL, 1387113236912611ULL,
+    2125245820685788ULL, 1030674016350092ULL, 1594684598654247ULL, 1165939511879820ULL,
+    271499323244173ULL, 546587254515484ULL, 945603425742936ULL, 1242252568170226ULL,
+    561598728058142ULL, 604827091794712ULL, 19869753585186ULL, 565367744708915ULL,
+    536755754533603ULL, 1767258313589487ULL, 907952975936127ULL, 292851652613937ULL,
+    163573546237963ULL, 837601408384564ULL, 591996990118301ULL, 2126051747693057ULL,
+    182247548824566ULL, 908369044122868ULL, 1335442699947273ULL, 2234292296528612ULL,
+    689537529333034ULL, 2174778663790714ULL, 1011407643592667ULL, 1856130618715473ULL,
+    1557437221651741ULL, 2250285407006102ULL, 1412384213410827ULL, 1428042038612456ULL,
+    962709733973660ULL, 313995703125919ULL, 1844969155869325ULL, 787716782673657ULL,
+    622504542173478ULL, 930119043384654ULL, 2128870043952488ULL, 537781531479523ULL,
+    1556666269904940ULL, 417333635741346ULL, 1986743846438415ULL, 877620478041197ULL,
+    2205624582983829ULL, 595260668884488ULL, 2025159350373157ULL, 2091659716088235ULL,
+    1423634716596391ULL, 653686638634080ULL, 1972388399989956ULL, 795575741798014ULL,
+    889240107997846ULL, 1446156876910732ULL, 1028507012221776ULL, 1071697574586478ULL,
+    1689630411899691ULL, 604092816502174ULL, 1909917373896122ULL, 1602544877643837ULL,
+    1227177032923867ULL, 62684197535630ULL, 186146290753883ULL, 414449055316766ULL,
+    1560555880866750ULL, 157579947096755ULL, 230526795502384ULL, 1197673369665894ULL,
+    593779215869037ULL, 214638834474097ULL, 1796344443484478ULL, 493550548257317ULL,
+    1628442824033694ULL, 1410811655893495ULL, 1009361960995171ULL, 604736219740352ULL,
+    392445928555351ULL, 1254295770295706ULL, 1958074535046128ULL, 508699942241019ULL,
+    739405911261325ULL, 1678760393882409ULL, 517763708545996ULL, 640040257898722ULL,
+    384966810872913ULL, 407454748380128ULL, 152604679407451ULL, 185102854927662ULL,
+    1448175503649595ULL, 100328519208674ULL, 1153263667012830ULL, 1643926437586490ULL,
+    609632142834154ULL, 980984004749261ULL, 855290732258779ULL, 2186022163021506ULL,
+    1254052618626070ULL, 1850030517182611ULL, 162348933090207ULL, 1948712273679932ULL,
+    1331832516262191ULL, 1219400369175863ULL, 89689036937483ULL, 1554886057235815ULL,
+    1520047528432789ULL, 81263957652811ULL, 146612464257008ULL, 2207945627164163ULL,
+    919846660682546ULL, 1925694087906686ULL, 2102027292388012ULL, 887992003198635ULL,
+    1817924871537027ULL, 746660005584342ULL, 753757153275525ULL, 91394270908699ULL,
+    511837226544151ULL, 736341543649373ULL, 1256371121466367ULL, 1977778299551813ULL,
+    817915174462263ULL, 1602323381418035ULL, 190035164572930ULL, 603796401391181ULL,
+    2152666873671669ULL, 1813900316324112ULL, 1292622433358041ULL, 888439870199892ULL,
+    978918155071994ULL, 534184417909805ULL, 466460084317313ULL, 1275223140288685ULL,
+    786407043883517ULL, 1620520623925754ULL, 1753625021290269ULL, 751937175104525ULL,
+    905301961820613ULL, 697059847245437ULL, 584919033981144ULL, 1272165506533156ULL,
+    1532180021450866ULL, 1901407354005301ULL, 1421319720492586ULL, 2179081609765456ULL,
+    2193253156667632ULL, 1080248329608584ULL, 2158422436462066ULL, 759167597017850ULL,
+    545759071151285ULL, 641600428493698ULL, 943791424499848ULL, 469571542427864ULL,
+    951117845222467ULL, 1780538594373407ULL, 614611122040309ULL, 1354826131886963ULL,
+    221898131992340ULL, 1145699723916219ULL, 798735379961769ULL, 1843560518208287ULL,
+    1424523160161545ULL, 205549016574779ULL, 2239491587362749ULL, 1918363582399888ULL,
+    1292183072788455ULL, 1783513123192567ULL, 1584027954317205ULL, 1890421443925740ULL,
+    1718459319874929ULL, 1522091040748809ULL, 399467600667219ULL, 1870973059066576ULL,
+    287514433150348ULL, 1397845311152885ULL, 1880440629872863ULL, 709302939340341ULL,
+    1813571361109209ULL, 86598795876860ULL, 1146964554310612ULL, 1590956584862432ULL,
+    2097004628155559ULL, 656227622102390ULL, 1808500445541891ULL, 958336726523135ULL,
+    2007604569465975ULL, 313504950390997ULL, 1399686004953620ULL, 1759732788465234ULL,
+    1562539721055836ULL, 1575722765016293ULL, 793318366641259ULL, 443876859384887ULL,
+    547308921989704ULL, 636698687503328ULL, 2179175835287340ULL, 498333551718258ULL,
+    932248760026176ULL, 1612395686304653ULL, 2179774103745626ULL, 1359658123541018ULL,
+    171488501802442ULL, 1625034951791350ULL, 520196922773633ULL, 1873787546341877ULL,
+    303457823885368ULL
   };
 
 static const
 uint64_t
 Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5[640U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)1738742601995546U, (uint64_t)1146398526822698U,
-    (uint64_t)2070867633025821U, (uint64_t)562264141797630U, (uint64_t)587772402128613U,
-    (uint64_t)1801439850948184U, (uint64_t)1351079888211148U, (uint64_t)450359962737049U,
-    (uint64_t)900719925474099U, (uint64_t)1801439850948198U, (uint64_t)1U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1841354044333475U,
-    (uint64_t)16398895984059U, (uint64_t)755974180946558U, (uint64_t)900171276175154U,
-    (uint64_t)1821297809914039U, (uint64_t)1661154287933054U, (uint64_t)284530020860578U,
-    (uint64_t)1390261174866914U, (uint64_t)1524110943907984U, (uint64_t)1045603498418422U,
-    (uint64_t)928651508580478U, (uint64_t)1383326941296346U, (uint64_t)961937908925785U,
-    (uint64_t)80455759693706U, (uint64_t)904734540352947U, (uint64_t)1507481815385608U,
-    (uint64_t)2223447444246085U, (uint64_t)1083941587175919U, (uint64_t)2059929906842505U,
-    (uint64_t)1581435440146976U, (uint64_t)782730187692425U, (uint64_t)9928394897574U,
-    (uint64_t)1539449519985236U, (uint64_t)1923587931078510U, (uint64_t)552919286076056U,
-    (uint64_t)376925408065760U, (uint64_t)447320488831784U, (uint64_t)1362918338468019U,
-    (uint64_t)1470031896696846U, (uint64_t)2189796996539902U, (uint64_t)1337552949959847U,
-    (uint64_t)1762287177775726U, (uint64_t)237994495816815U, (uint64_t)1277840395970544U,
-    (uint64_t)543972849007241U, (uint64_t)1224692671618814U, (uint64_t)162359533289271U,
-    (uint64_t)282240927125249U, (uint64_t)586909166382289U, (uint64_t)17726488197838U,
-    (uint64_t)377014554985659U, (uint64_t)1433835303052512U, (uint64_t)702061469493692U,
-    (uint64_t)1142253108318154U, (uint64_t)318297794307551U, (uint64_t)954362646308543U,
-    (uint64_t)517363881452320U, (uint64_t)1868013482130416U, (uint64_t)262562472373260U,
-    (uint64_t)902232853249919U, (uint64_t)2107343057055746U, (uint64_t)462368348619024U,
-    (uint64_t)1893758677092974U, (uint64_t)2177729767846389U, (uint64_t)2168532543559143U,
-    (uint64_t)443867094639821U, (uint64_t)730169342581022U, (uint64_t)1564589016879755U,
-    (uint64_t)51218195700649U, (uint64_t)76684578423745U, (uint64_t)560266272480743U,
-    (uint64_t)922517457707697U, (uint64_t)2066645939860874U, (uint64_t)1318277348414638U,
-    (uint64_t)1576726809084003U, (uint64_t)1817337608563665U, (uint64_t)1874240939237666U,
-    (uint64_t)754733726333910U, (uint64_t)97085310406474U, (uint64_t)751148364309235U,
-    (uint64_t)1622159695715187U, (uint64_t)1444098819684916U, (uint64_t)130920805558089U,
-    (uint64_t)1260449179085308U, (uint64_t)1860021740768461U, (uint64_t)110052860348509U,
-    (uint64_t)193830891643810U, (uint64_t)164148413933881U, (uint64_t)180017794795332U,
-    (uint64_t)1523506525254651U, (uint64_t)465981629225956U, (uint64_t)559733514964572U,
-    (uint64_t)1279624874416974U, (uint64_t)2026642326892306U, (uint64_t)1425156829982409U,
-    (uint64_t)2160936383793147U, (uint64_t)1061870624975247U, (uint64_t)2023497043036941U,
-    (uint64_t)117942212883190U, (uint64_t)490339622800774U, (uint64_t)1729931303146295U,
-    (uint64_t)422305932971074U, (uint64_t)529103152793096U, (uint64_t)1211973233775992U,
-    (uint64_t)721364955929681U, (uint64_t)1497674430438813U, (uint64_t)342545521275073U,
-    (uint64_t)2102107575279372U, (uint64_t)2108462244669966U, (uint64_t)1382582406064082U,
-    (uint64_t)2206396818383323U, (uint64_t)2109093268641147U, (uint64_t)10809845110983U,
-    (uint64_t)1605176920880099U, (uint64_t)744640650753946U, (uint64_t)1712758897518129U,
-    (uint64_t)373410811281809U, (uint64_t)648838265800209U, (uint64_t)813058095530999U,
-    (uint64_t)513987632620169U, (uint64_t)465516160703329U, (uint64_t)2136322186126330U,
-    (uint64_t)1979645899422932U, (uint64_t)1197131006470786U, (uint64_t)1467836664863979U,
-    (uint64_t)1340751381374628U, (uint64_t)1810066212667962U, (uint64_t)1009933588225499U,
-    (uint64_t)1106129188080873U, (uint64_t)1388980405213901U, (uint64_t)533719246598044U,
-    (uint64_t)1169435803073277U, (uint64_t)198920999285821U, (uint64_t)487492330629854U,
-    (uint64_t)1807093008537778U, (uint64_t)1540899012923865U, (uint64_t)2075080271659867U,
-    (uint64_t)1527990806921523U, (uint64_t)1323728742908002U, (uint64_t)1568595959608205U,
-    (uint64_t)1388032187497212U, (uint64_t)2026968840050568U, (uint64_t)1396591153295755U,
-    (uint64_t)820416950170901U, (uint64_t)520060313205582U, (uint64_t)2016404325094901U,
-    (uint64_t)1584709677868520U, (uint64_t)272161374469956U, (uint64_t)1567188603996816U,
-    (uint64_t)1986160530078221U, (uint64_t)553930264324589U, (uint64_t)1058426729027503U,
-    (uint64_t)8762762886675U, (uint64_t)2216098143382988U, (uint64_t)1835145266889223U,
-    (uint64_t)1712936431558441U, (uint64_t)1017009937844974U, (uint64_t)585361667812740U,
-    (uint64_t)2114711541628181U, (uint64_t)2238729632971439U, (uint64_t)121257546253072U,
-    (uint64_t)847154149018345U, (uint64_t)211972965476684U, (uint64_t)287499084460129U,
-    (uint64_t)2098247259180197U, (uint64_t)839070411583329U, (uint64_t)339551619574372U,
-    (uint64_t)1432951287640743U, (uint64_t)526481249498942U, (uint64_t)931991661905195U,
-    (uint64_t)1884279965674487U, (uint64_t)200486405604411U, (uint64_t)364173020594788U,
-    (uint64_t)518034455936955U, (uint64_t)1085564703965501U, (uint64_t)16030410467927U,
-    (uint64_t)604865933167613U, (uint64_t)1695298441093964U, (uint64_t)498856548116159U,
-    (uint64_t)2193030062787034U, (uint64_t)1706339802964179U, (uint64_t)1721199073493888U,
-    (uint64_t)820740951039755U, (uint64_t)1216053436896834U, (uint64_t)23954895815139U,
-    (uint64_t)1662515208920491U, (uint64_t)1705443427511899U, (uint64_t)1957928899570365U,
-    (uint64_t)1189636258255725U, (uint64_t)1795695471103809U, (uint64_t)1691191297654118U,
-    (uint64_t)282402585374360U, (uint64_t)460405330264832U, (uint64_t)63765529445733U,
-    (uint64_t)469763447404473U, (uint64_t)733607089694996U, (uint64_t)685410420186959U,
-    (uint64_t)1096682630419738U, (uint64_t)1162548510542362U, (uint64_t)1020949526456676U,
-    (uint64_t)1211660396870573U, (uint64_t)613126398222696U, (uint64_t)1117829165843251U,
-    (uint64_t)742432540886650U, (uint64_t)1483755088010658U, (uint64_t)942392007134474U,
-    (uint64_t)1447834130944107U, (uint64_t)489368274863410U, (uint64_t)23192985544898U,
-    (uint64_t)648442406146160U, (uint64_t)785438843373876U, (uint64_t)249464684645238U,
-    (uint64_t)170494608205618U, (uint64_t)335112827260550U, (uint64_t)1462050123162735U,
-    (uint64_t)1084803668439016U, (uint64_t)853459233600325U, (uint64_t)215777728187495U,
-    (uint64_t)1965759433526974U, (uint64_t)1349482894446537U, (uint64_t)694163317612871U,
-    (uint64_t)860536766165036U, (uint64_t)1178788094084321U, (uint64_t)1652739626626996U,
-    (uint64_t)2115723946388185U, (uint64_t)1577204379094664U, (uint64_t)1083882859023240U,
-    (uint64_t)1768759143381635U, (uint64_t)1737180992507258U, (uint64_t)246054513922239U,
-    (uint64_t)577253134087234U, (uint64_t)356340280578042U, (uint64_t)1638917769925142U,
-    (uint64_t)223550348130103U, (uint64_t)470592666638765U, (uint64_t)22663573966996U,
-    (uint64_t)596552461152400U, (uint64_t)364143537069499U, (uint64_t)3942119457699U,
-    (uint64_t)107951982889287U, (uint64_t)1843471406713209U, (uint64_t)1625773041610986U,
-    (uint64_t)1466141092501702U, (uint64_t)1043024095021271U, (uint64_t)310429964047508U,
-    (uint64_t)98559121500372U, (uint64_t)152746933782868U, (uint64_t)259407205078261U,
-    (uint64_t)828123093322585U, (uint64_t)1576847274280091U, (uint64_t)1170871375757302U,
-    (uint64_t)1588856194642775U, (uint64_t)984767822341977U, (uint64_t)1141497997993760U,
-    (uint64_t)809325345150796U, (uint64_t)1879837728202511U, (uint64_t)201340910657893U,
-    (uint64_t)1079157558888483U, (uint64_t)1052373448588065U, (uint64_t)1732036202501778U,
-    (uint64_t)2105292670328445U, (uint64_t)679751387312402U, (uint64_t)1679682144926229U,
-    (uint64_t)1695823455818780U, (uint64_t)498852317075849U, (uint64_t)1786555067788433U,
-    (uint64_t)1670727545779425U, (uint64_t)117945875433544U, (uint64_t)407939139781844U,
-    (uint64_t)854632120023778U, (uint64_t)1413383148360437U, (uint64_t)286030901733673U,
-    (uint64_t)1207361858071196U, (uint64_t)461340408181417U, (uint64_t)1096919590360164U,
-    (uint64_t)1837594897475685U, (uint64_t)533755561544165U, (uint64_t)1638688042247712U,
-    (uint64_t)1431653684793005U, (uint64_t)1036458538873559U, (uint64_t)390822120341779U,
-    (uint64_t)1920929837111618U, (uint64_t)543426740024168U, (uint64_t)645751357799929U,
-    (uint64_t)2245025632994463U, (uint64_t)1550778638076452U, (uint64_t)223738153459949U,
-    (uint64_t)1337209385492033U, (uint64_t)1276967236456531U, (uint64_t)1463815821063071U,
-    (uint64_t)2070620870191473U, (uint64_t)1199170709413753U, (uint64_t)273230877394166U,
-    (uint64_t)1873264887608046U, (uint64_t)890877152910775U, (uint64_t)983226445635730U,
-    (uint64_t)44873798519521U, (uint64_t)697147127512130U, (uint64_t)961631038239304U,
-    (uint64_t)709966160696826U, (uint64_t)1706677689540366U, (uint64_t)502782733796035U,
-    (uint64_t)812545535346033U, (uint64_t)1693622521296452U, (uint64_t)1955813093002510U,
-    (uint64_t)1259937612881362U, (uint64_t)1873032503803559U, (uint64_t)1140330566016428U,
-    (uint64_t)1675726082440190U, (uint64_t)60029928909786U, (uint64_t)170335608866763U,
-    (uint64_t)766444312315022U, (uint64_t)2025049511434113U, (uint64_t)2200845622430647U,
-    (uint64_t)1201269851450408U, (uint64_t)590071752404907U, (uint64_t)1400995030286946U,
-    (uint64_t)2152637413853822U, (uint64_t)2108495473841983U, (uint64_t)3855406710349U,
-    (uint64_t)1726137673168580U, (uint64_t)51004317200100U, (uint64_t)1749082328586939U,
-    (uint64_t)1704088976144558U, (uint64_t)1977318954775118U, (uint64_t)2062602253162400U,
-    (uint64_t)948062503217479U, (uint64_t)361953965048030U, (uint64_t)1528264887238440U,
-    (uint64_t)62582552172290U, (uint64_t)2241602163389280U, (uint64_t)156385388121765U,
-    (uint64_t)2124100319761492U, (uint64_t)388928050571382U, (uint64_t)1556123596922727U,
-    (uint64_t)979310669812384U, (uint64_t)113043855206104U, (uint64_t)2023223924825469U,
-    (uint64_t)643651703263034U, (uint64_t)2234446903655540U, (uint64_t)1577241261424997U,
-    (uint64_t)860253174523845U, (uint64_t)1691026473082448U, (uint64_t)1091672764933872U,
-    (uint64_t)1957463109756365U, (uint64_t)530699502660193U, (uint64_t)349587141723569U,
-    (uint64_t)674661681919563U, (uint64_t)1633727303856240U, (uint64_t)708909037922144U,
-    (uint64_t)2160722508518119U, (uint64_t)1302188051602540U, (uint64_t)976114603845777U,
-    (uint64_t)120004758721939U, (uint64_t)1681630708873780U, (uint64_t)622274095069244U,
-    (uint64_t)1822346309016698U, (uint64_t)1100921177951904U, (uint64_t)2216952659181677U,
-    (uint64_t)1844020550362490U, (uint64_t)1976451368365774U, (uint64_t)1321101422068822U,
-    (uint64_t)1189859436282668U, (uint64_t)2008801879735257U, (uint64_t)2219413454333565U,
-    (uint64_t)424288774231098U, (uint64_t)359793146977912U, (uint64_t)270293357948703U,
-    (uint64_t)587226003677000U, (uint64_t)1482071926139945U, (uint64_t)1419630774650359U,
-    (uint64_t)1104739070570175U, (uint64_t)1662129023224130U, (uint64_t)1609203612533411U,
-    (uint64_t)1250932720691980U, (uint64_t)95215711818495U, (uint64_t)498746909028150U,
-    (uint64_t)158151296991874U, (uint64_t)1201379988527734U, (uint64_t)561599945143989U,
-    (uint64_t)2211577425617888U, (uint64_t)2166577612206324U, (uint64_t)1057590354233512U,
-    (uint64_t)1968123280416769U, (uint64_t)1316586165401313U, (uint64_t)762728164447634U,
-    (uint64_t)2045395244316047U, (uint64_t)1531796898725716U, (uint64_t)315385971670425U,
-    (uint64_t)1109421039396756U, (uint64_t)2183635256408562U, (uint64_t)1896751252659461U,
-    (uint64_t)840236037179080U, (uint64_t)796245792277211U, (uint64_t)508345890111193U,
-    (uint64_t)1275386465287222U, (uint64_t)513560822858784U, (uint64_t)1784735733120313U,
-    (uint64_t)1346467478899695U, (uint64_t)601125231208417U, (uint64_t)701076661112726U,
-    (uint64_t)1841998436455089U, (uint64_t)1156768600940434U, (uint64_t)1967853462343221U,
-    (uint64_t)2178318463061452U, (uint64_t)481885520752741U, (uint64_t)675262828640945U,
-    (uint64_t)1033539418596582U, (uint64_t)1743329872635846U, (uint64_t)159322641251283U,
-    (uint64_t)1573076470127113U, (uint64_t)954827619308195U, (uint64_t)778834750662635U,
-    (uint64_t)619912782122617U, (uint64_t)515681498488209U, (uint64_t)1675866144246843U,
-    (uint64_t)811716020969981U, (uint64_t)1125515272217398U, (uint64_t)1398917918287342U,
-    (uint64_t)1301680949183175U, (uint64_t)726474739583734U, (uint64_t)587246193475200U,
-    (uint64_t)1096581582611864U, (uint64_t)1469911826213486U, (uint64_t)1990099711206364U,
-    (uint64_t)1256496099816508U, (uint64_t)2019924615195672U, (uint64_t)1251232456707555U,
-    (uint64_t)2042971196009755U, (uint64_t)214061878479265U, (uint64_t)115385726395472U,
-    (uint64_t)1677875239524132U, (uint64_t)756888883383540U, (uint64_t)1153862117756233U,
-    (uint64_t)503391530851096U, (uint64_t)946070017477513U, (uint64_t)1878319040542579U,
-    (uint64_t)1101349418586920U, (uint64_t)793245696431613U, (uint64_t)397920495357645U,
-    (uint64_t)2174023872951112U, (uint64_t)1517867915189593U, (uint64_t)1829855041462995U,
-    (uint64_t)1046709983503619U, (uint64_t)424081940711857U, (uint64_t)2112438073094647U,
-    (uint64_t)1504338467349861U, (uint64_t)2244574127374532U, (uint64_t)2136937537441911U,
-    (uint64_t)1741150838990304U, (uint64_t)25894628400571U, (uint64_t)512213526781178U,
-    (uint64_t)1168384260796379U, (uint64_t)1424607682379833U, (uint64_t)938677789731564U,
-    (uint64_t)872882241891896U, (uint64_t)1713199397007700U, (uint64_t)1410496326218359U,
-    (uint64_t)854379752407031U, (uint64_t)465141611727634U, (uint64_t)315176937037857U,
-    (uint64_t)1020115054571233U, (uint64_t)1856290111077229U, (uint64_t)2028366269898204U,
-    (uint64_t)1432980880307543U, (uint64_t)469932710425448U, (uint64_t)581165267592247U,
-    (uint64_t)496399148156603U, (uint64_t)2063435226705903U, (uint64_t)2116841086237705U,
-    (uint64_t)498272567217048U, (uint64_t)1829438076967906U, (uint64_t)1573925801278491U,
-    (uint64_t)460763576329867U, (uint64_t)1705264723728225U, (uint64_t)999514866082412U,
-    (uint64_t)29635061779362U, (uint64_t)1884233592281020U, (uint64_t)1449755591461338U,
-    (uint64_t)42579292783222U, (uint64_t)1869504355369200U, (uint64_t)495506004805251U,
-    (uint64_t)264073104888427U, (uint64_t)2088880861028612U, (uint64_t)104646456386576U,
-    (uint64_t)1258445191399967U, (uint64_t)1348736801545799U, (uint64_t)2068276361286613U,
-    (uint64_t)884897216646374U, (uint64_t)922387476801376U, (uint64_t)1043886580402805U,
-    (uint64_t)1240883498470831U, (uint64_t)1601554651937110U, (uint64_t)804382935289482U,
-    (uint64_t)512379564477239U, (uint64_t)1466384519077032U, (uint64_t)1280698500238386U,
-    (uint64_t)211303836685749U, (uint64_t)2081725624793803U, (uint64_t)545247644516879U,
-    (uint64_t)215313359330384U, (uint64_t)286479751145614U, (uint64_t)2213650281751636U,
-    (uint64_t)2164927945999874U, (uint64_t)2072162991540882U, (uint64_t)1443769115444779U,
-    (uint64_t)1581473274363095U, (uint64_t)434633875922699U, (uint64_t)340456055781599U,
-    (uint64_t)373043091080189U, (uint64_t)839476566531776U, (uint64_t)1856706858509978U,
-    (uint64_t)931616224909153U, (uint64_t)1888181317414065U, (uint64_t)213654322650262U,
-    (uint64_t)1161078103416244U, (uint64_t)1822042328851513U, (uint64_t)915817709028812U,
-    (uint64_t)1828297056698188U, (uint64_t)1212017130909403U, (uint64_t)60258343247333U,
-    (uint64_t)342085800008230U, (uint64_t)930240559508270U, (uint64_t)1549884999174952U,
-    (uint64_t)809895264249462U, (uint64_t)184726257947682U, (uint64_t)1157065433504828U,
-    (uint64_t)1209999630381477U, (uint64_t)999920399374391U, (uint64_t)1714770150788163U,
-    (uint64_t)2026130985413228U, (uint64_t)506776632883140U, (uint64_t)1349042668246528U,
-    (uint64_t)1937232292976967U, (uint64_t)942302637530730U, (uint64_t)160211904766226U,
-    (uint64_t)1042724500438571U, (uint64_t)212454865139142U, (uint64_t)244104425172642U,
-    (uint64_t)1376990622387496U, (uint64_t)76126752421227U, (uint64_t)1027540886376422U,
-    (uint64_t)1912210655133026U, (uint64_t)13410411589575U, (uint64_t)1475856708587773U,
-    (uint64_t)615563352691682U, (uint64_t)1446629324872644U, (uint64_t)1683670301784014U,
-    (uint64_t)1049873327197127U, (uint64_t)1826401704084838U, (uint64_t)2032577048760775U,
-    (uint64_t)1922203607878853U, (uint64_t)836708788764806U, (uint64_t)2193084654695012U,
-    (uint64_t)1342923183256659U, (uint64_t)849356986294271U, (uint64_t)1228863973965618U,
-    (uint64_t)94886161081867U, (uint64_t)1423288430204892U, (uint64_t)2016167528707016U,
-    (uint64_t)1633187660972877U, (uint64_t)1550621242301752U, (uint64_t)340630244512994U,
-    (uint64_t)2103577710806901U, (uint64_t)221625016538931U, (uint64_t)421544147350960U,
-    (uint64_t)580428704555156U, (uint64_t)1479831381265617U, (uint64_t)518057926544698U,
-    (uint64_t)955027348790630U, (uint64_t)1326749172561598U, (uint64_t)1118304625755967U,
-    (uint64_t)1994005916095176U, (uint64_t)1799757332780663U, (uint64_t)751343129396941U,
-    (uint64_t)1468672898746144U, (uint64_t)1451689964451386U, (uint64_t)755070293921171U,
-    (uint64_t)904857405877052U, (uint64_t)1276087530766984U, (uint64_t)403986562858511U,
-    (uint64_t)1530661255035337U, (uint64_t)1644972908910502U, (uint64_t)1370170080438957U,
-    (uint64_t)139839536695744U, (uint64_t)909930462436512U, (uint64_t)1899999215356933U,
-    (uint64_t)635992381064566U, (uint64_t)788740975837654U, (uint64_t)224241231493695U,
-    (uint64_t)1267090030199302U, (uint64_t)998908061660139U, (uint64_t)1784537499699278U,
-    (uint64_t)859195370018706U, (uint64_t)1953966091439379U, (uint64_t)2189271820076010U,
-    (uint64_t)2039067059943978U, (uint64_t)1526694380855202U, (uint64_t)2040321513194941U,
-    (uint64_t)329922071218689U, (uint64_t)1953032256401326U, (uint64_t)989631424403521U,
-    (uint64_t)328825014934242U, (uint64_t)9407151397696U, (uint64_t)63551373671268U,
-    (uint64_t)1624728632895792U, (uint64_t)1608324920739262U, (uint64_t)1178239350351945U,
-    (uint64_t)1198077399579702U, (uint64_t)277620088676229U, (uint64_t)1775359437312528U,
-    (uint64_t)1653558177737477U, (uint64_t)1652066043408850U, (uint64_t)1063359889686622U,
-    (uint64_t)1975063804860653U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1738742601995546ULL, 1146398526822698ULL, 2070867633025821ULL,
+    562264141797630ULL, 587772402128613ULL, 1801439850948184ULL, 1351079888211148ULL,
+    450359962737049ULL, 900719925474099ULL, 1801439850948198ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1841354044333475ULL, 16398895984059ULL, 755974180946558ULL, 900171276175154ULL,
+    1821297809914039ULL, 1661154287933054ULL, 284530020860578ULL, 1390261174866914ULL,
+    1524110943907984ULL, 1045603498418422ULL, 928651508580478ULL, 1383326941296346ULL,
+    961937908925785ULL, 80455759693706ULL, 904734540352947ULL, 1507481815385608ULL,
+    2223447444246085ULL, 1083941587175919ULL, 2059929906842505ULL, 1581435440146976ULL,
+    782730187692425ULL, 9928394897574ULL, 1539449519985236ULL, 1923587931078510ULL,
+    552919286076056ULL, 376925408065760ULL, 447320488831784ULL, 1362918338468019ULL,
+    1470031896696846ULL, 2189796996539902ULL, 1337552949959847ULL, 1762287177775726ULL,
+    237994495816815ULL, 1277840395970544ULL, 543972849007241ULL, 1224692671618814ULL,
+    162359533289271ULL, 282240927125249ULL, 586909166382289ULL, 17726488197838ULL,
+    377014554985659ULL, 1433835303052512ULL, 702061469493692ULL, 1142253108318154ULL,
+    318297794307551ULL, 954362646308543ULL, 517363881452320ULL, 1868013482130416ULL,
+    262562472373260ULL, 902232853249919ULL, 2107343057055746ULL, 462368348619024ULL,
+    1893758677092974ULL, 2177729767846389ULL, 2168532543559143ULL, 443867094639821ULL,
+    730169342581022ULL, 1564589016879755ULL, 51218195700649ULL, 76684578423745ULL,
+    560266272480743ULL, 922517457707697ULL, 2066645939860874ULL, 1318277348414638ULL,
+    1576726809084003ULL, 1817337608563665ULL, 1874240939237666ULL, 754733726333910ULL,
+    97085310406474ULL, 751148364309235ULL, 1622159695715187ULL, 1444098819684916ULL,
+    130920805558089ULL, 1260449179085308ULL, 1860021740768461ULL, 110052860348509ULL,
+    193830891643810ULL, 164148413933881ULL, 180017794795332ULL, 1523506525254651ULL,
+    465981629225956ULL, 559733514964572ULL, 1279624874416974ULL, 2026642326892306ULL,
+    1425156829982409ULL, 2160936383793147ULL, 1061870624975247ULL, 2023497043036941ULL,
+    117942212883190ULL, 490339622800774ULL, 1729931303146295ULL, 422305932971074ULL,
+    529103152793096ULL, 1211973233775992ULL, 721364955929681ULL, 1497674430438813ULL,
+    342545521275073ULL, 2102107575279372ULL, 2108462244669966ULL, 1382582406064082ULL,
+    2206396818383323ULL, 2109093268641147ULL, 10809845110983ULL, 1605176920880099ULL,
+    744640650753946ULL, 1712758897518129ULL, 373410811281809ULL, 648838265800209ULL,
+    813058095530999ULL, 513987632620169ULL, 465516160703329ULL, 2136322186126330ULL,
+    1979645899422932ULL, 1197131006470786ULL, 1467836664863979ULL, 1340751381374628ULL,
+    1810066212667962ULL, 1009933588225499ULL, 1106129188080873ULL, 1388980405213901ULL,
+    533719246598044ULL, 1169435803073277ULL, 198920999285821ULL, 487492330629854ULL,
+    1807093008537778ULL, 1540899012923865ULL, 2075080271659867ULL, 1527990806921523ULL,
+    1323728742908002ULL, 1568595959608205ULL, 1388032187497212ULL, 2026968840050568ULL,
+    1396591153295755ULL, 820416950170901ULL, 520060313205582ULL, 2016404325094901ULL,
+    1584709677868520ULL, 272161374469956ULL, 1567188603996816ULL, 1986160530078221ULL,
+    553930264324589ULL, 1058426729027503ULL, 8762762886675ULL, 2216098143382988ULL,
+    1835145266889223ULL, 1712936431558441ULL, 1017009937844974ULL, 585361667812740ULL,
+    2114711541628181ULL, 2238729632971439ULL, 121257546253072ULL, 847154149018345ULL,
+    211972965476684ULL, 287499084460129ULL, 2098247259180197ULL, 839070411583329ULL,
+    339551619574372ULL, 1432951287640743ULL, 526481249498942ULL, 931991661905195ULL,
+    1884279965674487ULL, 200486405604411ULL, 364173020594788ULL, 518034455936955ULL,
+    1085564703965501ULL, 16030410467927ULL, 604865933167613ULL, 1695298441093964ULL,
+    498856548116159ULL, 2193030062787034ULL, 1706339802964179ULL, 1721199073493888ULL,
+    820740951039755ULL, 1216053436896834ULL, 23954895815139ULL, 1662515208920491ULL,
+    1705443427511899ULL, 1957928899570365ULL, 1189636258255725ULL, 1795695471103809ULL,
+    1691191297654118ULL, 282402585374360ULL, 460405330264832ULL, 63765529445733ULL,
+    469763447404473ULL, 733607089694996ULL, 685410420186959ULL, 1096682630419738ULL,
+    1162548510542362ULL, 1020949526456676ULL, 1211660396870573ULL, 613126398222696ULL,
+    1117829165843251ULL, 742432540886650ULL, 1483755088010658ULL, 942392007134474ULL,
+    1447834130944107ULL, 489368274863410ULL, 23192985544898ULL, 648442406146160ULL,
+    785438843373876ULL, 249464684645238ULL, 170494608205618ULL, 335112827260550ULL,
+    1462050123162735ULL, 1084803668439016ULL, 853459233600325ULL, 215777728187495ULL,
+    1965759433526974ULL, 1349482894446537ULL, 694163317612871ULL, 860536766165036ULL,
+    1178788094084321ULL, 1652739626626996ULL, 2115723946388185ULL, 1577204379094664ULL,
+    1083882859023240ULL, 1768759143381635ULL, 1737180992507258ULL, 246054513922239ULL,
+    577253134087234ULL, 356340280578042ULL, 1638917769925142ULL, 223550348130103ULL,
+    470592666638765ULL, 22663573966996ULL, 596552461152400ULL, 364143537069499ULL, 3942119457699ULL,
+    107951982889287ULL, 1843471406713209ULL, 1625773041610986ULL, 1466141092501702ULL,
+    1043024095021271ULL, 310429964047508ULL, 98559121500372ULL, 152746933782868ULL,
+    259407205078261ULL, 828123093322585ULL, 1576847274280091ULL, 1170871375757302ULL,
+    1588856194642775ULL, 984767822341977ULL, 1141497997993760ULL, 809325345150796ULL,
+    1879837728202511ULL, 201340910657893ULL, 1079157558888483ULL, 1052373448588065ULL,
+    1732036202501778ULL, 2105292670328445ULL, 679751387312402ULL, 1679682144926229ULL,
+    1695823455818780ULL, 498852317075849ULL, 1786555067788433ULL, 1670727545779425ULL,
+    117945875433544ULL, 407939139781844ULL, 854632120023778ULL, 1413383148360437ULL,
+    286030901733673ULL, 1207361858071196ULL, 461340408181417ULL, 1096919590360164ULL,
+    1837594897475685ULL, 533755561544165ULL, 1638688042247712ULL, 1431653684793005ULL,
+    1036458538873559ULL, 390822120341779ULL, 1920929837111618ULL, 543426740024168ULL,
+    645751357799929ULL, 2245025632994463ULL, 1550778638076452ULL, 223738153459949ULL,
+    1337209385492033ULL, 1276967236456531ULL, 1463815821063071ULL, 2070620870191473ULL,
+    1199170709413753ULL, 273230877394166ULL, 1873264887608046ULL, 890877152910775ULL,
+    983226445635730ULL, 44873798519521ULL, 697147127512130ULL, 961631038239304ULL,
+    709966160696826ULL, 1706677689540366ULL, 502782733796035ULL, 812545535346033ULL,
+    1693622521296452ULL, 1955813093002510ULL, 1259937612881362ULL, 1873032503803559ULL,
+    1140330566016428ULL, 1675726082440190ULL, 60029928909786ULL, 170335608866763ULL,
+    766444312315022ULL, 2025049511434113ULL, 2200845622430647ULL, 1201269851450408ULL,
+    590071752404907ULL, 1400995030286946ULL, 2152637413853822ULL, 2108495473841983ULL,
+    3855406710349ULL, 1726137673168580ULL, 51004317200100ULL, 1749082328586939ULL,
+    1704088976144558ULL, 1977318954775118ULL, 2062602253162400ULL, 948062503217479ULL,
+    361953965048030ULL, 1528264887238440ULL, 62582552172290ULL, 2241602163389280ULL,
+    156385388121765ULL, 2124100319761492ULL, 388928050571382ULL, 1556123596922727ULL,
+    979310669812384ULL, 113043855206104ULL, 2023223924825469ULL, 643651703263034ULL,
+    2234446903655540ULL, 1577241261424997ULL, 860253174523845ULL, 1691026473082448ULL,
+    1091672764933872ULL, 1957463109756365ULL, 530699502660193ULL, 349587141723569ULL,
+    674661681919563ULL, 1633727303856240ULL, 708909037922144ULL, 2160722508518119ULL,
+    1302188051602540ULL, 976114603845777ULL, 120004758721939ULL, 1681630708873780ULL,
+    622274095069244ULL, 1822346309016698ULL, 1100921177951904ULL, 2216952659181677ULL,
+    1844020550362490ULL, 1976451368365774ULL, 1321101422068822ULL, 1189859436282668ULL,
+    2008801879735257ULL, 2219413454333565ULL, 424288774231098ULL, 359793146977912ULL,
+    270293357948703ULL, 587226003677000ULL, 1482071926139945ULL, 1419630774650359ULL,
+    1104739070570175ULL, 1662129023224130ULL, 1609203612533411ULL, 1250932720691980ULL,
+    95215711818495ULL, 498746909028150ULL, 158151296991874ULL, 1201379988527734ULL,
+    561599945143989ULL, 2211577425617888ULL, 2166577612206324ULL, 1057590354233512ULL,
+    1968123280416769ULL, 1316586165401313ULL, 762728164447634ULL, 2045395244316047ULL,
+    1531796898725716ULL, 315385971670425ULL, 1109421039396756ULL, 2183635256408562ULL,
+    1896751252659461ULL, 840236037179080ULL, 796245792277211ULL, 508345890111193ULL,
+    1275386465287222ULL, 513560822858784ULL, 1784735733120313ULL, 1346467478899695ULL,
+    601125231208417ULL, 701076661112726ULL, 1841998436455089ULL, 1156768600940434ULL,
+    1967853462343221ULL, 2178318463061452ULL, 481885520752741ULL, 675262828640945ULL,
+    1033539418596582ULL, 1743329872635846ULL, 159322641251283ULL, 1573076470127113ULL,
+    954827619308195ULL, 778834750662635ULL, 619912782122617ULL, 515681498488209ULL,
+    1675866144246843ULL, 811716020969981ULL, 1125515272217398ULL, 1398917918287342ULL,
+    1301680949183175ULL, 726474739583734ULL, 587246193475200ULL, 1096581582611864ULL,
+    1469911826213486ULL, 1990099711206364ULL, 1256496099816508ULL, 2019924615195672ULL,
+    1251232456707555ULL, 2042971196009755ULL, 214061878479265ULL, 115385726395472ULL,
+    1677875239524132ULL, 756888883383540ULL, 1153862117756233ULL, 503391530851096ULL,
+    946070017477513ULL, 1878319040542579ULL, 1101349418586920ULL, 793245696431613ULL,
+    397920495357645ULL, 2174023872951112ULL, 1517867915189593ULL, 1829855041462995ULL,
+    1046709983503619ULL, 424081940711857ULL, 2112438073094647ULL, 1504338467349861ULL,
+    2244574127374532ULL, 2136937537441911ULL, 1741150838990304ULL, 25894628400571ULL,
+    512213526781178ULL, 1168384260796379ULL, 1424607682379833ULL, 938677789731564ULL,
+    872882241891896ULL, 1713199397007700ULL, 1410496326218359ULL, 854379752407031ULL,
+    465141611727634ULL, 315176937037857ULL, 1020115054571233ULL, 1856290111077229ULL,
+    2028366269898204ULL, 1432980880307543ULL, 469932710425448ULL, 581165267592247ULL,
+    496399148156603ULL, 2063435226705903ULL, 2116841086237705ULL, 498272567217048ULL,
+    1829438076967906ULL, 1573925801278491ULL, 460763576329867ULL, 1705264723728225ULL,
+    999514866082412ULL, 29635061779362ULL, 1884233592281020ULL, 1449755591461338ULL,
+    42579292783222ULL, 1869504355369200ULL, 495506004805251ULL, 264073104888427ULL,
+    2088880861028612ULL, 104646456386576ULL, 1258445191399967ULL, 1348736801545799ULL,
+    2068276361286613ULL, 884897216646374ULL, 922387476801376ULL, 1043886580402805ULL,
+    1240883498470831ULL, 1601554651937110ULL, 804382935289482ULL, 512379564477239ULL,
+    1466384519077032ULL, 1280698500238386ULL, 211303836685749ULL, 2081725624793803ULL,
+    545247644516879ULL, 215313359330384ULL, 286479751145614ULL, 2213650281751636ULL,
+    2164927945999874ULL, 2072162991540882ULL, 1443769115444779ULL, 1581473274363095ULL,
+    434633875922699ULL, 340456055781599ULL, 373043091080189ULL, 839476566531776ULL,
+    1856706858509978ULL, 931616224909153ULL, 1888181317414065ULL, 213654322650262ULL,
+    1161078103416244ULL, 1822042328851513ULL, 915817709028812ULL, 1828297056698188ULL,
+    1212017130909403ULL, 60258343247333ULL, 342085800008230ULL, 930240559508270ULL,
+    1549884999174952ULL, 809895264249462ULL, 184726257947682ULL, 1157065433504828ULL,
+    1209999630381477ULL, 999920399374391ULL, 1714770150788163ULL, 2026130985413228ULL,
+    506776632883140ULL, 1349042668246528ULL, 1937232292976967ULL, 942302637530730ULL,
+    160211904766226ULL, 1042724500438571ULL, 212454865139142ULL, 244104425172642ULL,
+    1376990622387496ULL, 76126752421227ULL, 1027540886376422ULL, 1912210655133026ULL,
+    13410411589575ULL, 1475856708587773ULL, 615563352691682ULL, 1446629324872644ULL,
+    1683670301784014ULL, 1049873327197127ULL, 1826401704084838ULL, 2032577048760775ULL,
+    1922203607878853ULL, 836708788764806ULL, 2193084654695012ULL, 1342923183256659ULL,
+    849356986294271ULL, 1228863973965618ULL, 94886161081867ULL, 1423288430204892ULL,
+    2016167528707016ULL, 1633187660972877ULL, 1550621242301752ULL, 340630244512994ULL,
+    2103577710806901ULL, 221625016538931ULL, 421544147350960ULL, 580428704555156ULL,
+    1479831381265617ULL, 518057926544698ULL, 955027348790630ULL, 1326749172561598ULL,
+    1118304625755967ULL, 1994005916095176ULL, 1799757332780663ULL, 751343129396941ULL,
+    1468672898746144ULL, 1451689964451386ULL, 755070293921171ULL, 904857405877052ULL,
+    1276087530766984ULL, 403986562858511ULL, 1530661255035337ULL, 1644972908910502ULL,
+    1370170080438957ULL, 139839536695744ULL, 909930462436512ULL, 1899999215356933ULL,
+    635992381064566ULL, 788740975837654ULL, 224241231493695ULL, 1267090030199302ULL,
+    998908061660139ULL, 1784537499699278ULL, 859195370018706ULL, 1953966091439379ULL,
+    2189271820076010ULL, 2039067059943978ULL, 1526694380855202ULL, 2040321513194941ULL,
+    329922071218689ULL, 1953032256401326ULL, 989631424403521ULL, 328825014934242ULL,
+    9407151397696ULL, 63551373671268ULL, 1624728632895792ULL, 1608324920739262ULL,
+    1178239350351945ULL, 1198077399579702ULL, 277620088676229ULL, 1775359437312528ULL,
+    1653558177737477ULL, 1652066043408850ULL, 1063359889686622ULL, 1975063804860653ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_Frodo_KEM.h b/include/msvc/internal/Hacl_Frodo_KEM.h
index 61574981..6a1ece49 100644
--- a/include/msvc/internal/Hacl_Frodo_KEM.h
+++ b/include/msvc/internal/Hacl_Frodo_KEM.h
@@ -55,22 +55,22 @@ Hacl_Keccak_shake128_4x(
   uint8_t *output3
 )
 {
-  Hacl_SHA3_shake128_hacl(input_len, input0, output_len, output0);
-  Hacl_SHA3_shake128_hacl(input_len, input1, output_len, output1);
-  Hacl_SHA3_shake128_hacl(input_len, input2, output_len, output2);
-  Hacl_SHA3_shake128_hacl(input_len, input3, output_len, output3);
+  Hacl_Hash_SHA3_shake128_hacl(input_len, input0, output_len, output0);
+  Hacl_Hash_SHA3_shake128_hacl(input_len, input1, output_len, output1);
+  Hacl_Hash_SHA3_shake128_hacl(input_len, input2, output_len, output2);
+  Hacl_Hash_SHA3_shake128_hacl(input_len, input3, output_len, output3);
 }
 
 static inline void
 Hacl_Impl_Matrix_mod_pow2(uint32_t n1, uint32_t n2, uint32_t logq, uint16_t *a)
 {
-  if (logq < (uint32_t)16U)
+  if (logq < 16U)
   {
-    for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+    for (uint32_t i0 = 0U; i0 < n1; i0++)
     {
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      for (uint32_t i = 0U; i < n2; i++)
       {
-        a[i0 * n2 + i] = a[i0 * n2 + i] & (((uint16_t)1U << logq) - (uint16_t)1U);
+        a[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] & ((1U << logq) - 1U);
       }
     }
     return;
@@ -80,11 +80,11 @@ Hacl_Impl_Matrix_mod_pow2(uint32_t n1, uint32_t n2, uint32_t logq, uint16_t *a)
 static inline void
 Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i = (uint32_t)0U; i < n2; i++)
+    for (uint32_t i = 0U; i < n2; i++)
     {
-      a[i0 * n2 + i] = a[i0 * n2 + i] + b[i0 * n2 + i];
+      a[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] + (uint32_t)b[i0 * n2 + i];
     }
   }
 }
@@ -92,11 +92,11 @@ Hacl_Impl_Matrix_matrix_add(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 static inline void
 Hacl_Impl_Matrix_matrix_sub(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i = (uint32_t)0U; i < n2; i++)
+    for (uint32_t i = 0U; i < n2; i++)
     {
-      b[i0 * n2 + i] = a[i0 * n2 + i] - b[i0 * n2 + i];
+      b[i0 * n2 + i] = (uint32_t)a[i0 * n2 + i] - (uint32_t)b[i0 * n2 + i];
     }
   }
 }
@@ -111,17 +111,17 @@ Hacl_Impl_Matrix_matrix_mul(
   uint16_t *c
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1++)
+    for (uint32_t i1 = 0U; i1 < n3; i1++)
     {
-      uint16_t res = (uint16_t)0U;
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      uint16_t res = 0U;
+      for (uint32_t i = 0U; i < n2; i++)
       {
         uint16_t aij = a[i0 * n2 + i];
         uint16_t bjk = b[i * n3 + i1];
         uint16_t res0 = res;
-        res = res0 + aij * bjk;
+        res = (uint32_t)res0 + (uint32_t)aij * (uint32_t)bjk;
       }
       c[i0 * n3 + i1] = res;
     }
@@ -138,17 +138,17 @@ Hacl_Impl_Matrix_matrix_mul_s(
   uint16_t *c
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n3; i1++)
+    for (uint32_t i1 = 0U; i1 < n3; i1++)
     {
-      uint16_t res = (uint16_t)0U;
-      for (uint32_t i = (uint32_t)0U; i < n2; i++)
+      uint16_t res = 0U;
+      for (uint32_t i = 0U; i < n2; i++)
       {
         uint16_t aij = a[i0 * n2 + i];
         uint16_t bjk = b[i1 * n2 + i];
         uint16_t res0 = res;
-        res = res0 + aij * bjk;
+        res = (uint32_t)res0 + (uint32_t)aij * (uint32_t)bjk;
       }
       c[i0 * n3 + i1] = res;
     }
@@ -158,11 +158,11 @@ Hacl_Impl_Matrix_matrix_mul_s(
 static inline uint16_t
 Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 {
-  uint16_t res = (uint16_t)0xFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  uint16_t res = 0xFFFFU;
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
     uint16_t uu____0 = FStar_UInt16_eq_mask(a[i], b[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint16_t r = res;
   return r;
@@ -171,19 +171,19 @@ Hacl_Impl_Matrix_matrix_eq(uint32_t n1, uint32_t n2, uint16_t *a, uint16_t *b)
 static inline void
 Hacl_Impl_Matrix_matrix_to_lbytes(uint32_t n1, uint32_t n2, uint16_t *m, uint8_t *res)
 {
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
-    store16_le(res + (uint32_t)2U * i, m[i]);
+    store16_le(res + 2U * i, m[i]);
   }
 }
 
 static inline void
 Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16_t *res)
 {
-  for (uint32_t i = (uint32_t)0U; i < n1 * n2; i++)
+  for (uint32_t i = 0U; i < n1 * n2; i++)
   {
     uint16_t *os = res;
-    uint16_t u = load16_le(b + (uint32_t)2U * i);
+    uint16_t u = load16_le(b + 2U * i);
     uint16_t x = u;
     os[i] = x;
   }
@@ -192,53 +192,53 @@ Hacl_Impl_Matrix_matrix_from_lbytes(uint32_t n1, uint32_t n2, uint8_t *b, uint16
 static inline void
 Hacl_Impl_Frodo_Gen_frodo_gen_matrix_shake_4x(uint32_t n, uint8_t *seed, uint16_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint8_t), (uint32_t)8U * n);
-  uint8_t *r = (uint8_t *)alloca((uint32_t)8U * n * sizeof (uint8_t));
-  memset(r, 0U, (uint32_t)8U * n * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), 8U * n);
+  uint8_t *r = (uint8_t *)alloca(8U * n * sizeof (uint8_t));
+  memset(r, 0U, 8U * n * sizeof (uint8_t));
   uint8_t tmp_seed[72U] = { 0U };
-  memcpy(tmp_seed + (uint32_t)2U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)20U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)38U, seed, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(tmp_seed + (uint32_t)56U, seed, (uint32_t)16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 2U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 20U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 38U, seed, 16U * sizeof (uint8_t));
+  memcpy(tmp_seed + 56U, seed, 16U * sizeof (uint8_t));
   memset(res, 0U, n * n * sizeof (uint16_t));
-  for (uint32_t i = (uint32_t)0U; i < n / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < n / 4U; i++)
   {
-    uint8_t *r0 = r + (uint32_t)0U * n;
-    uint8_t *r1 = r + (uint32_t)2U * n;
-    uint8_t *r2 = r + (uint32_t)4U * n;
-    uint8_t *r3 = r + (uint32_t)6U * n;
+    uint8_t *r0 = r + 0U * n;
+    uint8_t *r1 = r + 2U * n;
+    uint8_t *r2 = r + 4U * n;
+    uint8_t *r3 = r + 6U * n;
     uint8_t *tmp_seed0 = tmp_seed;
-    uint8_t *tmp_seed1 = tmp_seed + (uint32_t)18U;
-    uint8_t *tmp_seed2 = tmp_seed + (uint32_t)36U;
-    uint8_t *tmp_seed3 = tmp_seed + (uint32_t)54U;
-    store16_le(tmp_seed0, (uint16_t)((uint32_t)4U * i + (uint32_t)0U));
-    store16_le(tmp_seed1, (uint16_t)((uint32_t)4U * i + (uint32_t)1U));
-    store16_le(tmp_seed2, (uint16_t)((uint32_t)4U * i + (uint32_t)2U));
-    store16_le(tmp_seed3, (uint16_t)((uint32_t)4U * i + (uint32_t)3U));
-    Hacl_Keccak_shake128_4x((uint32_t)18U,
+    uint8_t *tmp_seed1 = tmp_seed + 18U;
+    uint8_t *tmp_seed2 = tmp_seed + 36U;
+    uint8_t *tmp_seed3 = tmp_seed + 54U;
+    store16_le(tmp_seed0, (uint16_t)(4U * i + 0U));
+    store16_le(tmp_seed1, (uint16_t)(4U * i + 1U));
+    store16_le(tmp_seed2, (uint16_t)(4U * i + 2U));
+    store16_le(tmp_seed3, (uint16_t)(4U * i + 3U));
+    Hacl_Keccak_shake128_4x(18U,
       tmp_seed0,
       tmp_seed1,
       tmp_seed2,
       tmp_seed3,
-      (uint32_t)2U * n,
+      2U * n,
       r0,
       r1,
       r2,
       r3);
-    for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+    for (uint32_t i0 = 0U; i0 < n; i0++)
     {
-      uint8_t *resij0 = r0 + i0 * (uint32_t)2U;
-      uint8_t *resij1 = r1 + i0 * (uint32_t)2U;
-      uint8_t *resij2 = r2 + i0 * (uint32_t)2U;
-      uint8_t *resij3 = r3 + i0 * (uint32_t)2U;
+      uint8_t *resij0 = r0 + i0 * 2U;
+      uint8_t *resij1 = r1 + i0 * 2U;
+      uint8_t *resij2 = r2 + i0 * 2U;
+      uint8_t *resij3 = r3 + i0 * 2U;
       uint16_t u = load16_le(resij0);
-      res[((uint32_t)4U * i + (uint32_t)0U) * n + i0] = u;
+      res[(4U * i + 0U) * n + i0] = u;
       uint16_t u0 = load16_le(resij1);
-      res[((uint32_t)4U * i + (uint32_t)1U) * n + i0] = u0;
+      res[(4U * i + 1U) * n + i0] = u0;
       uint16_t u1 = load16_le(resij2);
-      res[((uint32_t)4U * i + (uint32_t)2U) * n + i0] = u1;
+      res[(4U * i + 2U) * n + i0] = u1;
       uint16_t u2 = load16_le(resij3);
-      res[((uint32_t)4U * i + (uint32_t)3U) * n + i0] = u2;
+      res[(4U * i + 3U) * n + i0] = u2;
     }
   }
 }
@@ -270,27 +270,19 @@ static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table640[13U] =
   {
-    (uint16_t)4643U, (uint16_t)13363U, (uint16_t)20579U, (uint16_t)25843U, (uint16_t)29227U,
-    (uint16_t)31145U, (uint16_t)32103U, (uint16_t)32525U, (uint16_t)32689U, (uint16_t)32745U,
-    (uint16_t)32762U, (uint16_t)32766U, (uint16_t)32767U
+    4643U, 13363U, 20579U, 25843U, 29227U, 31145U, 32103U, 32525U, 32689U, 32745U, 32762U, 32766U,
+    32767U
   };
 
 static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table976[11U] =
-  {
-    (uint16_t)5638U, (uint16_t)15915U, (uint16_t)23689U, (uint16_t)28571U, (uint16_t)31116U,
-    (uint16_t)32217U, (uint16_t)32613U, (uint16_t)32731U, (uint16_t)32760U, (uint16_t)32766U,
-    (uint16_t)32767U
-  };
+  { 5638U, 15915U, 23689U, 28571U, 31116U, 32217U, 32613U, 32731U, 32760U, 32766U, 32767U };
 
 static const
 uint16_t
 Hacl_Impl_Frodo_Params_cdf_table1344[7U] =
-  {
-    (uint16_t)9142U, (uint16_t)23462U, (uint16_t)30338U, (uint16_t)32361U, (uint16_t)32725U,
-    (uint16_t)32765U, (uint16_t)32767U
-  };
+  { 9142U, 23462U, 30338U, 32361U, 32725U, 32765U, 32767U };
 
 static inline void
 Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(
@@ -301,26 +293,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)12U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 12U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -334,26 +326,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)12U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 12U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table640[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -367,26 +359,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)10U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 10U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table976[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -400,26 +392,26 @@ Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(
 )
 {
   memset(res, 0U, n1 * n2 * sizeof (uint16_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < n1; i0++)
+  for (uint32_t i0 = 0U; i0 < n1; i0++)
   {
-    for (uint32_t i1 = (uint32_t)0U; i1 < n2; i1++)
+    for (uint32_t i1 = 0U; i1 < n2; i1++)
     {
-      uint8_t *resij = r + (uint32_t)2U * (n2 * i0 + i1);
+      uint8_t *resij = r + 2U * (n2 * i0 + i1);
       uint16_t u = load16_le(resij);
       uint16_t uu____0 = u;
-      uint16_t prnd = uu____0 >> (uint32_t)1U;
-      uint16_t sign = uu____0 & (uint16_t)1U;
-      uint16_t sample = (uint16_t)0U;
-      uint32_t bound = (uint32_t)6U;
-      for (uint32_t i = (uint32_t)0U; i < bound; i++)
+      uint16_t prnd = (uint32_t)uu____0 >> 1U;
+      uint16_t sign = (uint32_t)uu____0 & 1U;
+      uint16_t sample = 0U;
+      uint32_t bound = 6U;
+      for (uint32_t i = 0U; i < bound; i++)
       {
         uint16_t sample0 = sample;
         uint16_t ti = Hacl_Impl_Frodo_Params_cdf_table1344[i];
-        uint16_t samplei = (uint16_t)(uint32_t)(ti - prnd) >> (uint32_t)15U;
-        sample = samplei + sample0;
+        uint16_t samplei = (uint32_t)(uint16_t)(uint32_t)((uint32_t)ti - (uint32_t)prnd) >> 15U;
+        sample = (uint32_t)samplei + (uint32_t)sample0;
       }
       uint16_t sample0 = sample;
-      res[i0 * n2 + i1] = ((~sign + (uint16_t)1U) ^ sample0) + sign;
+      res[i0 * n2 + i1] = (((uint32_t)~sign + 1U) ^ (uint32_t)sample0) + (uint32_t)sign;
     }
   }
 }
@@ -435,39 +427,34 @@ Hacl_Impl_Frodo_Pack_frodo_pack(
   uint8_t *res
 )
 {
-  uint32_t n = n1 * n2 / (uint32_t)8U;
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  uint32_t n = n1 * n2 / 8U;
+  for (uint32_t i = 0U; i < n; i++)
   {
-    uint16_t *a1 = a + (uint32_t)8U * i;
+    uint16_t *a1 = a + 8U * i;
     uint8_t *r = res + d * i;
-    uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
+    uint16_t maskd = (uint32_t)(uint16_t)(1U << d) - 1U;
     uint8_t v16[16U] = { 0U };
-    uint16_t a0 = a1[0U] & maskd;
-    uint16_t a11 = a1[1U] & maskd;
-    uint16_t a2 = a1[2U] & maskd;
-    uint16_t a3 = a1[3U] & maskd;
-    uint16_t a4 = a1[4U] & maskd;
-    uint16_t a5 = a1[5U] & maskd;
-    uint16_t a6 = a1[6U] & maskd;
-    uint16_t a7 = a1[7U] & maskd;
+    uint16_t a0 = (uint32_t)a1[0U] & (uint32_t)maskd;
+    uint16_t a11 = (uint32_t)a1[1U] & (uint32_t)maskd;
+    uint16_t a2 = (uint32_t)a1[2U] & (uint32_t)maskd;
+    uint16_t a3 = (uint32_t)a1[3U] & (uint32_t)maskd;
+    uint16_t a4 = (uint32_t)a1[4U] & (uint32_t)maskd;
+    uint16_t a5 = (uint32_t)a1[5U] & (uint32_t)maskd;
+    uint16_t a6 = (uint32_t)a1[6U] & (uint32_t)maskd;
+    uint16_t a7 = (uint32_t)a1[7U] & (uint32_t)maskd;
     FStar_UInt128_uint128
     templong =
       FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_logor(FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a0),
-                      (uint32_t)7U * d),
-                    FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a11),
-                      (uint32_t)6U * d)),
-                  FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a2),
-                    (uint32_t)5U * d)),
-                FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a3),
-                  (uint32_t)4U * d)),
-              FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a4),
-                (uint32_t)3U * d)),
-            FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a5),
-              (uint32_t)2U * d)),
-          FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a6), (uint32_t)1U * d)),
-        FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a7), (uint32_t)0U * d));
+                      7U * d),
+                    FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a11), 6U * d)),
+                  FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a2), 5U * d)),
+                FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a3), 4U * d)),
+              FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a4), 3U * d)),
+            FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a5), 2U * d)),
+          FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a6), 1U * d)),
+        FStar_UInt128_shift_left(FStar_UInt128_uint64_to_uint128((uint64_t)a7), 0U * d));
     store128_be(v16, templong);
-    uint8_t *src = v16 + (uint32_t)16U - d;
+    uint8_t *src = v16 + 16U - d;
     memcpy(r, src, d * sizeof (uint8_t));
   }
 }
@@ -481,48 +468,48 @@ Hacl_Impl_Frodo_Pack_frodo_unpack(
   uint16_t *res
 )
 {
-  uint32_t n = n1 * n2 / (uint32_t)8U;
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  uint32_t n = n1 * n2 / 8U;
+  for (uint32_t i = 0U; i < n; i++)
   {
     uint8_t *b1 = b + d * i;
-    uint16_t *r = res + (uint32_t)8U * i;
-    uint16_t maskd = (uint16_t)((uint32_t)1U << d) - (uint16_t)1U;
+    uint16_t *r = res + 8U * i;
+    uint16_t maskd = (uint32_t)(uint16_t)(1U << d) - 1U;
     uint8_t src[16U] = { 0U };
-    memcpy(src + (uint32_t)16U - d, b1, d * sizeof (uint8_t));
+    memcpy(src + 16U - d, b1, d * sizeof (uint8_t));
     FStar_UInt128_uint128 u = load128_be(src);
     FStar_UInt128_uint128 templong = u;
     r[0U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)7U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          7U * d))
+      & (uint32_t)maskd;
     r[1U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)6U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          6U * d))
+      & (uint32_t)maskd;
     r[2U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)5U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          5U * d))
+      & (uint32_t)maskd;
     r[3U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)4U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          4U * d))
+      & (uint32_t)maskd;
     r[4U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)3U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          3U * d))
+      & (uint32_t)maskd;
     r[5U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)2U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          2U * d))
+      & (uint32_t)maskd;
     r[6U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)1U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          1U * d))
+      & (uint32_t)maskd;
     r[7U] =
-      (uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
-          (uint32_t)0U * d))
-      & maskd;
+      (uint32_t)(uint16_t)FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(templong,
+          0U * d))
+      & (uint32_t)maskd;
   }
 }
 
@@ -535,7 +522,7 @@ Hacl_Impl_Frodo_Encode_frodo_key_encode(
   uint16_t *res
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+  for (uint32_t i0 = 0U; i0 < n; i0++)
   {
     uint8_t v8[8U] = { 0U };
     uint8_t *chunk = a + i0 * b;
@@ -544,11 +531,11 @@ Hacl_Impl_Frodo_Encode_frodo_key_encode(
     uint64_t x = u;
     uint64_t x0 = x;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
-      uint64_t rk = x0 >> b * i & (((uint64_t)1U << b) - (uint64_t)1U);
-      res[i0 * n + i] = (uint16_t)rk << (logq - b););
+      0U,
+      8U,
+      1U,
+      uint64_t rk = x0 >> b * i & ((1ULL << b) - 1ULL);
+      res[i0 * n + i] = (uint32_t)(uint16_t)rk << (logq - b););
   }
 }
 
@@ -561,16 +548,16 @@ Hacl_Impl_Frodo_Encode_frodo_key_decode(
   uint8_t *res
 )
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < n; i0++)
+  for (uint32_t i0 = 0U; i0 < n; i0++)
   {
-    uint64_t templong = (uint64_t)0U;
+    uint64_t templong = 0ULL;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
+      0U,
+      8U,
+      1U,
       uint16_t aik = a[i0 * n + i];
-      uint16_t res1 = (aik + ((uint16_t)1U << (logq - b - (uint32_t)1U))) >> (logq - b);
-      templong = templong | (uint64_t)(res1 & (((uint16_t)1U << b) - (uint16_t)1U)) << b * i;);
+      uint16_t res1 = (((uint32_t)aik + (1U << (logq - b - 1U))) & 0xFFFFU) >> (logq - b);
+      templong = templong | (uint64_t)((uint32_t)res1 & ((1U << b) - 1U)) << b * i;);
     uint64_t templong0 = templong;
     uint8_t v8[8U] = { 0U };
     store64_le(v8, templong0);
diff --git a/include/internal/Hacl_Hash_Blake2.h b/include/msvc/internal/Hacl_HMAC.h
similarity index 82%
rename from include/internal/Hacl_Hash_Blake2.h
rename to include/msvc/internal/Hacl_HMAC.h
index 8f308bd9..ad344c4c 100644
--- a/include/internal/Hacl_Hash_Blake2.h
+++ b/include/msvc/internal/Hacl_HMAC.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __internal_Hacl_Hash_Blake2_H
-#define __internal_Hacl_Hash_Blake2_H
+#ifndef __internal_Hacl_HMAC_H
+#define __internal_Hacl_HMAC_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,8 +35,12 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "internal/Hacl_Impl_Blake2_Constants.h"
-#include "../Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Krmllib.h"
+#include "internal/Hacl_Hash_SHA2.h"
+#include "internal/Hacl_Hash_SHA1.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b.h"
+#include "../Hacl_HMAC.h"
 
 typedef struct K___uint32_t_uint32_t_s
 {
@@ -49,5 +53,5 @@ K___uint32_t_uint32_t;
 }
 #endif
 
-#define __internal_Hacl_Hash_Blake2_H_DEFINED
+#define __internal_Hacl_HMAC_H_DEFINED
 #endif
diff --git a/include/msvc/internal/Hacl_Hash_Blake2b.h b/include/msvc/internal/Hacl_Hash_Blake2b.h
new file mode 100644
index 00000000..21689d60
--- /dev/null
+++ b/include/msvc/internal/Hacl_Hash_Blake2b.h
@@ -0,0 +1,70 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __internal_Hacl_Hash_Blake2b_H
+#define __internal_Hacl_Hash_Blake2b_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "../Hacl_Hash_Blake2b.h"
+
+void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn);
+
+void
+Hacl_Hash_Blake2b_update_multi(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks,
+  uint32_t nb
+);
+
+void
+Hacl_Hash_Blake2b_update_last(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint32_t rem,
+  uint8_t *d
+);
+
+void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __internal_Hacl_Hash_Blake2b_H_DEFINED
+#endif
diff --git a/include/msvc/Hacl_Hash_Blake2b_256.h b/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h
similarity index 61%
rename from include/msvc/Hacl_Hash_Blake2b_256.h
rename to include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h
index 2379fd75..4cc07869 100644
--- a/include/msvc/Hacl_Hash_Blake2b_256.h
+++ b/include/msvc/internal/Hacl_Hash_Blake2b_Simd256.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Hash_Blake2b_256_H
-#define __Hacl_Hash_Blake2b_256_H
+#ifndef __internal_Hacl_Hash_Blake2b_Simd256_H
+#define __internal_Hacl_Hash_Blake2b_Simd256_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,23 +35,15 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "Hacl_Krmllib.h"
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "../Hacl_Hash_Blake2b_Simd256.h"
 #include "libintvector.h"
 
 void
-Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn);
+Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn);
 
 void
-Hacl_Blake2b_256_blake2b_update_key(
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-);
-
-void
-Hacl_Blake2b_256_blake2b_update_multi(
+Hacl_Hash_Blake2b_Simd256_update_multi(
   uint32_t len,
   Lib_IntVector_Intrinsics_vec256 *wv,
   Lib_IntVector_Intrinsics_vec256 *hash,
@@ -61,7 +53,7 @@ Hacl_Blake2b_256_blake2b_update_multi(
 );
 
 void
-Hacl_Blake2b_256_blake2b_update_last(
+Hacl_Hash_Blake2b_Simd256_update_last(
   uint32_t len,
   Lib_IntVector_Intrinsics_vec256 *wv,
   Lib_IntVector_Intrinsics_vec256 *hash,
@@ -71,49 +63,29 @@ Hacl_Blake2b_256_blake2b_update_last(
 );
 
 void
-Hacl_Blake2b_256_blake2b_finish(
+Hacl_Hash_Blake2b_Simd256_finish(
   uint32_t nn,
   uint8_t *output,
   Lib_IntVector_Intrinsics_vec256 *hash
 );
 
-/**
-Write the BLAKE2b digest of message `d` using key `k` into `output`.
-
-@param nn Length of the to-be-generated digest with 1 <= `nn` <= 64.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2b_256_blake2b(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-);
-
 void
-Hacl_Blake2b_256_load_state256b_from_state32(
+Hacl_Hash_Blake2b_Simd256_load_state256b_from_state32(
   Lib_IntVector_Intrinsics_vec256 *st,
   uint64_t *st32
 );
 
 void
-Hacl_Blake2b_256_store_state256b_to_state32(
+Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32(
   uint64_t *st32,
   Lib_IntVector_Intrinsics_vec256 *st
 );
 
-Lib_IntVector_Intrinsics_vec256 *Hacl_Blake2b_256_blake2b_malloc(void);
+Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_Simd256_malloc_with_key(void);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Hash_Blake2b_256_H_DEFINED
+#define __internal_Hacl_Hash_Blake2b_Simd256_H_DEFINED
 #endif
diff --git a/include/msvc/internal/Hacl_Hash_Blake2s.h b/include/msvc/internal/Hacl_Hash_Blake2s.h
new file mode 100644
index 00000000..f814aa95
--- /dev/null
+++ b/include/msvc/internal/Hacl_Hash_Blake2s.h
@@ -0,0 +1,70 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#ifndef __internal_Hacl_Hash_Blake2s_H
+#define __internal_Hacl_Hash_Blake2s_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <string.h>
+#include "krml/internal/types.h"
+#include "krml/lowstar_endianness.h"
+#include "krml/internal/target.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "../Hacl_Hash_Blake2s.h"
+
+void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn);
+
+void
+Hacl_Hash_Blake2s_update_multi(
+  uint32_t len,
+  uint32_t *wv,
+  uint32_t *hash,
+  uint64_t prev,
+  uint8_t *blocks,
+  uint32_t nb
+);
+
+void
+Hacl_Hash_Blake2s_update_last(
+  uint32_t len,
+  uint32_t *wv,
+  uint32_t *hash,
+  uint64_t prev,
+  uint32_t rem,
+  uint8_t *d
+);
+
+void Hacl_Hash_Blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#define __internal_Hacl_Hash_Blake2s_H_DEFINED
+#endif
diff --git a/include/Hacl_Hash_Blake2s_128.h b/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h
similarity index 61%
rename from include/Hacl_Hash_Blake2s_128.h
rename to include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h
index 2af827cd..0589aec5 100644
--- a/include/Hacl_Hash_Blake2s_128.h
+++ b/include/msvc/internal/Hacl_Hash_Blake2s_Simd128.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Hash_Blake2s_128_H
-#define __Hacl_Hash_Blake2s_128_H
+#ifndef __internal_Hacl_Hash_Blake2s_Simd128_H
+#define __internal_Hacl_Hash_Blake2s_Simd128_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,22 +35,15 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "../Hacl_Hash_Blake2s_Simd128.h"
 #include "libintvector.h"
 
 void
-Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn);
+Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn);
 
 void
-Hacl_Blake2s_128_blake2s_update_key(
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-);
-
-void
-Hacl_Blake2s_128_blake2s_update_multi(
+Hacl_Hash_Blake2s_Simd128_update_multi(
   uint32_t len,
   Lib_IntVector_Intrinsics_vec128 *wv,
   Lib_IntVector_Intrinsics_vec128 *hash,
@@ -60,7 +53,7 @@ Hacl_Blake2s_128_blake2s_update_multi(
 );
 
 void
-Hacl_Blake2s_128_blake2s_update_last(
+Hacl_Hash_Blake2s_Simd128_update_last(
   uint32_t len,
   Lib_IntVector_Intrinsics_vec128 *wv,
   Lib_IntVector_Intrinsics_vec128 *hash,
@@ -70,49 +63,29 @@ Hacl_Blake2s_128_blake2s_update_last(
 );
 
 void
-Hacl_Blake2s_128_blake2s_finish(
+Hacl_Hash_Blake2s_Simd128_finish(
   uint32_t nn,
   uint8_t *output,
   Lib_IntVector_Intrinsics_vec128 *hash
 );
 
-/**
-Write the BLAKE2s digest of message `d` using key `k` into `output`.
-
-@param nn Length of to-be-generated digest with 1 <= `nn` <= 32.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2s_128_blake2s(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-);
-
 void
-Hacl_Blake2s_128_store_state128s_to_state32(
+Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32(
   uint32_t *st32,
   Lib_IntVector_Intrinsics_vec128 *st
 );
 
 void
-Hacl_Blake2s_128_load_state128s_from_state32(
+Hacl_Hash_Blake2s_Simd128_load_state128s_from_state32(
   Lib_IntVector_Intrinsics_vec128 *st,
   uint32_t *st32
 );
 
-Lib_IntVector_Intrinsics_vec128 *Hacl_Blake2s_128_blake2s_malloc(void);
+Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_Simd128_malloc_with_key(void);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Hash_Blake2s_128_H_DEFINED
+#define __internal_Hacl_Hash_Blake2s_Simd128_H_DEFINED
 #endif
diff --git a/include/msvc/internal/Hacl_Hash_MD5.h b/include/msvc/internal/Hacl_Hash_MD5.h
index 7fd567f3..dd77aaf1 100644
--- a/include/msvc/internal/Hacl_Hash_MD5.h
+++ b/include/msvc/internal/Hacl_Hash_MD5.h
@@ -37,21 +37,16 @@ extern "C" {
 
 #include "../Hacl_Hash_MD5.h"
 
-void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s);
+void Hacl_Hash_MD5_init(uint32_t *s);
 
-void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst);
+void Hacl_Hash_MD5_finish(uint32_t *s, uint8_t *dst);
 
-void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
+void Hacl_Hash_MD5_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
 
 void
-Hacl_Hash_MD5_legacy_update_last(
-  uint32_t *s,
-  uint64_t prev_len,
-  uint8_t *input,
-  uint32_t input_len
-);
-
-void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+Hacl_Hash_MD5_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len);
+
+void Hacl_Hash_MD5_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/internal/Hacl_Hash_SHA1.h b/include/msvc/internal/Hacl_Hash_SHA1.h
index 72cf492c..ed53be55 100644
--- a/include/msvc/internal/Hacl_Hash_SHA1.h
+++ b/include/msvc/internal/Hacl_Hash_SHA1.h
@@ -37,21 +37,16 @@ extern "C" {
 
 #include "../Hacl_Hash_SHA1.h"
 
-void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s);
+void Hacl_Hash_SHA1_init(uint32_t *s);
 
-void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst);
+void Hacl_Hash_SHA1_finish(uint32_t *s, uint8_t *dst);
 
-void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
+void Hacl_Hash_SHA1_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks);
 
 void
-Hacl_Hash_SHA1_legacy_update_last(
-  uint32_t *s,
-  uint64_t prev_len,
-  uint8_t *input,
-  uint32_t input_len
-);
-
-void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst);
+Hacl_Hash_SHA1_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len);
+
+void Hacl_Hash_SHA1_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len);
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/internal/Hacl_Hash_SHA2.h b/include/msvc/internal/Hacl_Hash_SHA2.h
index bbffdc50..7dade3f3 100644
--- a/include/msvc/internal/Hacl_Hash_SHA2.h
+++ b/include/msvc/internal/Hacl_Hash_SHA2.h
@@ -40,141 +40,121 @@ extern "C" {
 
 static const
 uint32_t
-Hacl_Impl_SHA2_Generic_h224[8U] =
+Hacl_Hash_SHA2_h224[8U] =
   {
-    (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U,
-    (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U
+    0xc1059ed8U, 0x367cd507U, 0x3070dd17U, 0xf70e5939U, 0xffc00b31U, 0x68581511U, 0x64f98fa7U,
+    0xbefa4fa4U
   };
 
 static const
 uint32_t
-Hacl_Impl_SHA2_Generic_h256[8U] =
+Hacl_Hash_SHA2_h256[8U] =
   {
-    (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU,
-    (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U
+    0x6a09e667U, 0xbb67ae85U, 0x3c6ef372U, 0xa54ff53aU, 0x510e527fU, 0x9b05688cU, 0x1f83d9abU,
+    0x5be0cd19U
   };
 
 static const
 uint64_t
-Hacl_Impl_SHA2_Generic_h384[8U] =
+Hacl_Hash_SHA2_h384[8U] =
   {
-    (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U,
-    (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U,
-    (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U
+    0xcbbb9d5dc1059ed8ULL, 0x629a292a367cd507ULL, 0x9159015a3070dd17ULL, 0x152fecd8f70e5939ULL,
+    0x67332667ffc00b31ULL, 0x8eb44a8768581511ULL, 0xdb0c2e0d64f98fa7ULL, 0x47b5481dbefa4fa4ULL
   };
 
 static const
 uint64_t
-Hacl_Impl_SHA2_Generic_h512[8U] =
+Hacl_Hash_SHA2_h512[8U] =
   {
-    (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU,
-    (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU,
-    (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U
+    0x6a09e667f3bcc908ULL, 0xbb67ae8584caa73bULL, 0x3c6ef372fe94f82bULL, 0xa54ff53a5f1d36f1ULL,
+    0x510e527fade682d1ULL, 0x9b05688c2b3e6c1fULL, 0x1f83d9abfb41bd6bULL, 0x5be0cd19137e2179ULL
   };
 
 static const
 uint32_t
-Hacl_Impl_SHA2_Generic_k224_256[64U] =
+Hacl_Hash_SHA2_k224_256[64U] =
   {
-    (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
-    (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
-    (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
-    (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
-    (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
-    (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
-    (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
-    (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
-    (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
-    (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
-    (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
-    (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
-    (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
-    (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
-    (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
-    (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+    0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U,
+    0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU,
+    0x9bdc06a7U, 0xc19bf174U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU,
+    0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U,
+    0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU,
+    0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU,
+    0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U,
+    0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
+    0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U,
+    0xc67178f2U
   };
 
 static const
 uint64_t
-Hacl_Impl_SHA2_Generic_k384_512[80U] =
+Hacl_Hash_SHA2_k384_512[80U] =
   {
-    (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU,
-    (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U,
-    (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U,
-    (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U,
-    (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U,
-    (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U,
-    (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U,
-    (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U,
-    (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU,
-    (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U,
-    (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU,
-    (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU,
-    (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U,
-    (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U,
-    (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U,
-    (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U,
-    (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U,
-    (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU,
-    (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU,
-    (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU,
-    (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U,
-    (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U,
-    (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU,
-    (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU,
-    (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU,
-    (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU,
-    (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U
+    0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+    0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+    0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+    0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+    0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+    0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+    0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+    0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+    0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+    0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+    0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+    0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+    0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+    0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+    0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+    0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+    0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+    0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+    0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+    0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
   };
 
-void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash);
+void Hacl_Hash_SHA2_sha256_init(uint32_t *hash);
 
-void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st);
+void Hacl_Hash_SHA2_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st);
 
 void
-Hacl_SHA2_Scalar32_sha256_update_last(
-  uint64_t totlen,
-  uint32_t len,
-  uint8_t *b,
-  uint32_t *hash
-);
+Hacl_Hash_SHA2_sha256_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *hash);
 
-void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha256_finish(uint32_t *st, uint8_t *h);
 
-void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash);
+void Hacl_Hash_SHA2_sha224_init(uint32_t *hash);
 
 void
-Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st);
+Hacl_Hash_SHA2_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st);
 
-void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha224_finish(uint32_t *st, uint8_t *h);
 
-void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash);
+void Hacl_Hash_SHA2_sha512_init(uint64_t *hash);
 
-void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
+void Hacl_Hash_SHA2_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
 
 void
-Hacl_SHA2_Scalar32_sha512_update_last(
+Hacl_Hash_SHA2_sha512_update_last(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
   uint8_t *b,
   uint64_t *hash
 );
 
-void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha512_finish(uint64_t *st, uint8_t *h);
 
-void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash);
+void Hacl_Hash_SHA2_sha384_init(uint64_t *hash);
 
-void Hacl_SHA2_Scalar32_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
+void Hacl_Hash_SHA2_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st);
 
 void
-Hacl_SHA2_Scalar32_sha384_update_last(
+Hacl_Hash_SHA2_sha384_update_last(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
   uint8_t *b,
   uint64_t *st
 );
 
-void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h);
+void Hacl_Hash_SHA2_sha384_finish(uint64_t *st, uint8_t *h);
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/internal/Hacl_Hash_SHA3.h b/include/msvc/internal/Hacl_Hash_SHA3.h
index 6f53d37c..1c8129fb 100644
--- a/include/msvc/internal/Hacl_Hash_SHA3.h
+++ b/include/msvc/internal/Hacl_Hash_SHA3.h
@@ -53,9 +53,9 @@ Hacl_Hash_SHA3_update_last_sha3(
   uint32_t input_len
 );
 
-void Hacl_Impl_SHA3_state_permute(uint64_t *s);
+void Hacl_Hash_SHA3_state_permute(uint64_t *s);
 
-void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s);
+void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s);
 
 #if defined(__cplusplus)
 }
diff --git a/include/msvc/internal/Hacl_Impl_Blake2_Constants.h b/include/msvc/internal/Hacl_Impl_Blake2_Constants.h
index 185317ba..aedc2486 100644
--- a/include/msvc/internal/Hacl_Impl_Blake2_Constants.h
+++ b/include/msvc/internal/Hacl_Impl_Blake2_Constants.h
@@ -37,52 +37,32 @@ extern "C" {
 
 static const
 uint32_t
-Hacl_Impl_Blake2_Constants_sigmaTable[160U] =
+Hacl_Hash_Blake2s_sigmaTable[160U] =
   {
-    (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U, (uint32_t)4U, (uint32_t)5U,
-    (uint32_t)6U, (uint32_t)7U, (uint32_t)8U, (uint32_t)9U, (uint32_t)10U, (uint32_t)11U,
-    (uint32_t)12U, (uint32_t)13U, (uint32_t)14U, (uint32_t)15U, (uint32_t)14U, (uint32_t)10U,
-    (uint32_t)4U, (uint32_t)8U, (uint32_t)9U, (uint32_t)15U, (uint32_t)13U, (uint32_t)6U,
-    (uint32_t)1U, (uint32_t)12U, (uint32_t)0U, (uint32_t)2U, (uint32_t)11U, (uint32_t)7U,
-    (uint32_t)5U, (uint32_t)3U, (uint32_t)11U, (uint32_t)8U, (uint32_t)12U, (uint32_t)0U,
-    (uint32_t)5U, (uint32_t)2U, (uint32_t)15U, (uint32_t)13U, (uint32_t)10U, (uint32_t)14U,
-    (uint32_t)3U, (uint32_t)6U, (uint32_t)7U, (uint32_t)1U, (uint32_t)9U, (uint32_t)4U,
-    (uint32_t)7U, (uint32_t)9U, (uint32_t)3U, (uint32_t)1U, (uint32_t)13U, (uint32_t)12U,
-    (uint32_t)11U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U, (uint32_t)5U, (uint32_t)10U,
-    (uint32_t)4U, (uint32_t)0U, (uint32_t)15U, (uint32_t)8U, (uint32_t)9U, (uint32_t)0U,
-    (uint32_t)5U, (uint32_t)7U, (uint32_t)2U, (uint32_t)4U, (uint32_t)10U, (uint32_t)15U,
-    (uint32_t)14U, (uint32_t)1U, (uint32_t)11U, (uint32_t)12U, (uint32_t)6U, (uint32_t)8U,
-    (uint32_t)3U, (uint32_t)13U, (uint32_t)2U, (uint32_t)12U, (uint32_t)6U, (uint32_t)10U,
-    (uint32_t)0U, (uint32_t)11U, (uint32_t)8U, (uint32_t)3U, (uint32_t)4U, (uint32_t)13U,
-    (uint32_t)7U, (uint32_t)5U, (uint32_t)15U, (uint32_t)14U, (uint32_t)1U, (uint32_t)9U,
-    (uint32_t)12U, (uint32_t)5U, (uint32_t)1U, (uint32_t)15U, (uint32_t)14U, (uint32_t)13U,
-    (uint32_t)4U, (uint32_t)10U, (uint32_t)0U, (uint32_t)7U, (uint32_t)6U, (uint32_t)3U,
-    (uint32_t)9U, (uint32_t)2U, (uint32_t)8U, (uint32_t)11U, (uint32_t)13U, (uint32_t)11U,
-    (uint32_t)7U, (uint32_t)14U, (uint32_t)12U, (uint32_t)1U, (uint32_t)3U, (uint32_t)9U,
-    (uint32_t)5U, (uint32_t)0U, (uint32_t)15U, (uint32_t)4U, (uint32_t)8U, (uint32_t)6U,
-    (uint32_t)2U, (uint32_t)10U, (uint32_t)6U, (uint32_t)15U, (uint32_t)14U, (uint32_t)9U,
-    (uint32_t)11U, (uint32_t)3U, (uint32_t)0U, (uint32_t)8U, (uint32_t)12U, (uint32_t)2U,
-    (uint32_t)13U, (uint32_t)7U, (uint32_t)1U, (uint32_t)4U, (uint32_t)10U, (uint32_t)5U,
-    (uint32_t)10U, (uint32_t)2U, (uint32_t)8U, (uint32_t)4U, (uint32_t)7U, (uint32_t)6U,
-    (uint32_t)1U, (uint32_t)5U, (uint32_t)15U, (uint32_t)11U, (uint32_t)9U, (uint32_t)14U,
-    (uint32_t)3U, (uint32_t)12U, (uint32_t)13U
+    0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U, 8U, 9U, 10U, 11U, 12U, 13U, 14U, 15U, 14U, 10U, 4U, 8U, 9U, 15U,
+    13U, 6U, 1U, 12U, 0U, 2U, 11U, 7U, 5U, 3U, 11U, 8U, 12U, 0U, 5U, 2U, 15U, 13U, 10U, 14U, 3U, 6U,
+    7U, 1U, 9U, 4U, 7U, 9U, 3U, 1U, 13U, 12U, 11U, 14U, 2U, 6U, 5U, 10U, 4U, 0U, 15U, 8U, 9U, 0U,
+    5U, 7U, 2U, 4U, 10U, 15U, 14U, 1U, 11U, 12U, 6U, 8U, 3U, 13U, 2U, 12U, 6U, 10U, 0U, 11U, 8U, 3U,
+    4U, 13U, 7U, 5U, 15U, 14U, 1U, 9U, 12U, 5U, 1U, 15U, 14U, 13U, 4U, 10U, 0U, 7U, 6U, 3U, 9U, 2U,
+    8U, 11U, 13U, 11U, 7U, 14U, 12U, 1U, 3U, 9U, 5U, 0U, 15U, 4U, 8U, 6U, 2U, 10U, 6U, 15U, 14U, 9U,
+    11U, 3U, 0U, 8U, 12U, 2U, 13U, 7U, 1U, 4U, 10U, 5U, 10U, 2U, 8U, 4U, 7U, 6U, 1U, 5U, 15U, 11U,
+    9U, 14U, 3U, 12U, 13U
   };
 
 static const
 uint32_t
-Hacl_Impl_Blake2_Constants_ivTable_S[8U] =
+Hacl_Hash_Blake2s_ivTable_S[8U] =
   {
-    (uint32_t)0x6A09E667U, (uint32_t)0xBB67AE85U, (uint32_t)0x3C6EF372U, (uint32_t)0xA54FF53AU,
-    (uint32_t)0x510E527FU, (uint32_t)0x9B05688CU, (uint32_t)0x1F83D9ABU, (uint32_t)0x5BE0CD19U
+    0x6A09E667U, 0xBB67AE85U, 0x3C6EF372U, 0xA54FF53AU, 0x510E527FU, 0x9B05688CU, 0x1F83D9ABU,
+    0x5BE0CD19U
   };
 
 static const
 uint64_t
-Hacl_Impl_Blake2_Constants_ivTable_B[8U] =
+Hacl_Hash_Blake2s_ivTable_B[8U] =
   {
-    (uint64_t)0x6A09E667F3BCC908U, (uint64_t)0xBB67AE8584CAA73BU, (uint64_t)0x3C6EF372FE94F82BU,
-    (uint64_t)0xA54FF53A5F1D36F1U, (uint64_t)0x510E527FADE682D1U, (uint64_t)0x9B05688C2B3E6C1FU,
-    (uint64_t)0x1F83D9ABFB41BD6BU, (uint64_t)0x5BE0CD19137E2179U
+    0x6A09E667F3BCC908ULL, 0xBB67AE8584CAA73BULL, 0x3C6EF372FE94F82BULL, 0xA54FF53A5F1D36F1ULL,
+    0x510E527FADE682D1ULL, 0x9B05688C2B3E6C1FULL, 0x1F83D9ABFB41BD6BULL, 0x5BE0CD19137E2179ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_Impl_FFDHE_Constants.h b/include/msvc/internal/Hacl_Impl_FFDHE_Constants.h
index c746c411..80cbdd52 100644
--- a/include/msvc/internal/Hacl_Impl_FFDHE_Constants.h
+++ b/include/msvc/internal/Hacl_Impl_FFDHE_Constants.h
@@ -35,528 +35,265 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-static const uint8_t Hacl_Impl_FFDHE_Constants_ffdhe_g2[1U] = { (uint8_t)0x02U };
+static const uint8_t Hacl_Impl_FFDHE_Constants_ffdhe_g2[1U] = { 0x02U };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p2048[256U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x28U,
-    (uint8_t)0x5CU, (uint8_t)0x97U, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x28U, 0x5CU,
+    0x97U, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p3072[384U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0xC6U, (uint8_t)0x2EU, (uint8_t)0x37U, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0xC6U, 0x2EU, 0x37U, 0xFFU,
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p4096[512U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x65U, (uint8_t)0x5FU, (uint8_t)0x6AU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x65U, 0x5FU, 0x6AU, 0xFFU, 0xFFU, 0xFFU,
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p6144[768U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U,
-    (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U,
-    (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU,
-    (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU,
-    (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U,
-    (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U,
-    (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U,
-    (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU,
-    (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU,
-    (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU,
-    (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU,
-    (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U,
-    (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U,
-    (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U,
-    (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU,
-    (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU,
-    (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U,
-    (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U,
-    (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU,
-    (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU,
-    (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U,
-    (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U,
-    (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU,
-    (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU,
-    (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U,
-    (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U,
-    (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U,
-    (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U,
-    (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU,
-    (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U,
-    (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U,
-    (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU,
-    (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU,
-    (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU,
-    (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U,
-    (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU,
-    (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U,
-    (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU,
-    (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU,
-    (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U,
-    (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U,
-    (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U,
-    (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU,
-    (uint8_t)0xD0U, (uint8_t)0xE4U, (uint8_t)0x0EU, (uint8_t)0x65U, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x0DU, 0xD9U, 0x02U, 0x0BU, 0xFDU, 0x64U,
+    0xB6U, 0x45U, 0x03U, 0x6CU, 0x7AU, 0x4EU, 0x67U, 0x7DU, 0x2CU, 0x38U, 0x53U, 0x2AU, 0x3AU,
+    0x23U, 0xBAU, 0x44U, 0x42U, 0xCAU, 0xF5U, 0x3EU, 0xA6U, 0x3BU, 0xB4U, 0x54U, 0x32U, 0x9BU,
+    0x76U, 0x24U, 0xC8U, 0x91U, 0x7BU, 0xDDU, 0x64U, 0xB1U, 0xC0U, 0xFDU, 0x4CU, 0xB3U, 0x8EU,
+    0x8CU, 0x33U, 0x4CU, 0x70U, 0x1CU, 0x3AU, 0xCDU, 0xADU, 0x06U, 0x57U, 0xFCU, 0xCFU, 0xECU,
+    0x71U, 0x9BU, 0x1FU, 0x5CU, 0x3EU, 0x4EU, 0x46U, 0x04U, 0x1FU, 0x38U, 0x81U, 0x47U, 0xFBU,
+    0x4CU, 0xFDU, 0xB4U, 0x77U, 0xA5U, 0x24U, 0x71U, 0xF7U, 0xA9U, 0xA9U, 0x69U, 0x10U, 0xB8U,
+    0x55U, 0x32U, 0x2EU, 0xDBU, 0x63U, 0x40U, 0xD8U, 0xA0U, 0x0EU, 0xF0U, 0x92U, 0x35U, 0x05U,
+    0x11U, 0xE3U, 0x0AU, 0xBEU, 0xC1U, 0xFFU, 0xF9U, 0xE3U, 0xA2U, 0x6EU, 0x7FU, 0xB2U, 0x9FU,
+    0x8CU, 0x18U, 0x30U, 0x23U, 0xC3U, 0x58U, 0x7EU, 0x38U, 0xDAU, 0x00U, 0x77U, 0xD9U, 0xB4U,
+    0x76U, 0x3EU, 0x4EU, 0x4BU, 0x94U, 0xB2U, 0xBBU, 0xC1U, 0x94U, 0xC6U, 0x65U, 0x1EU, 0x77U,
+    0xCAU, 0xF9U, 0x92U, 0xEEU, 0xAAU, 0xC0U, 0x23U, 0x2AU, 0x28U, 0x1BU, 0xF6U, 0xB3U, 0xA7U,
+    0x39U, 0xC1U, 0x22U, 0x61U, 0x16U, 0x82U, 0x0AU, 0xE8U, 0xDBU, 0x58U, 0x47U, 0xA6U, 0x7CU,
+    0xBEU, 0xF9U, 0xC9U, 0x09U, 0x1BU, 0x46U, 0x2DU, 0x53U, 0x8CU, 0xD7U, 0x2BU, 0x03U, 0x74U,
+    0x6AU, 0xE7U, 0x7FU, 0x5EU, 0x62U, 0x29U, 0x2CU, 0x31U, 0x15U, 0x62U, 0xA8U, 0x46U, 0x50U,
+    0x5DU, 0xC8U, 0x2DU, 0xB8U, 0x54U, 0x33U, 0x8AU, 0xE4U, 0x9FU, 0x52U, 0x35U, 0xC9U, 0x5BU,
+    0x91U, 0x17U, 0x8CU, 0xCFU, 0x2DU, 0xD5U, 0xCAU, 0xCEU, 0xF4U, 0x03U, 0xECU, 0x9DU, 0x18U,
+    0x10U, 0xC6U, 0x27U, 0x2BU, 0x04U, 0x5BU, 0x3BU, 0x71U, 0xF9U, 0xDCU, 0x6BU, 0x80U, 0xD6U,
+    0x3FU, 0xDDU, 0x4AU, 0x8EU, 0x9AU, 0xDBU, 0x1EU, 0x69U, 0x62U, 0xA6U, 0x95U, 0x26U, 0xD4U,
+    0x31U, 0x61U, 0xC1U, 0xA4U, 0x1DU, 0x57U, 0x0DU, 0x79U, 0x38U, 0xDAU, 0xD4U, 0xA4U, 0x0EU,
+    0x32U, 0x9CU, 0xD0U, 0xE4U, 0x0EU, 0x65U, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU,
+    0xFFU
   };
 
 static const
 uint8_t
 Hacl_Impl_FFDHE_Constants_ffdhe_p8192[1024U] =
   {
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xADU, (uint8_t)0xF8U, (uint8_t)0x54U, (uint8_t)0x58U,
-    (uint8_t)0xA2U, (uint8_t)0xBBU, (uint8_t)0x4AU, (uint8_t)0x9AU, (uint8_t)0xAFU, (uint8_t)0xDCU,
-    (uint8_t)0x56U, (uint8_t)0x20U, (uint8_t)0x27U, (uint8_t)0x3DU, (uint8_t)0x3CU, (uint8_t)0xF1U,
-    (uint8_t)0xD8U, (uint8_t)0xB9U, (uint8_t)0xC5U, (uint8_t)0x83U, (uint8_t)0xCEU, (uint8_t)0x2DU,
-    (uint8_t)0x36U, (uint8_t)0x95U, (uint8_t)0xA9U, (uint8_t)0xE1U, (uint8_t)0x36U, (uint8_t)0x41U,
-    (uint8_t)0x14U, (uint8_t)0x64U, (uint8_t)0x33U, (uint8_t)0xFBU, (uint8_t)0xCCU, (uint8_t)0x93U,
-    (uint8_t)0x9DU, (uint8_t)0xCEU, (uint8_t)0x24U, (uint8_t)0x9BU, (uint8_t)0x3EU, (uint8_t)0xF9U,
-    (uint8_t)0x7DU, (uint8_t)0x2FU, (uint8_t)0xE3U, (uint8_t)0x63U, (uint8_t)0x63U, (uint8_t)0x0CU,
-    (uint8_t)0x75U, (uint8_t)0xD8U, (uint8_t)0xF6U, (uint8_t)0x81U, (uint8_t)0xB2U, (uint8_t)0x02U,
-    (uint8_t)0xAEU, (uint8_t)0xC4U, (uint8_t)0x61U, (uint8_t)0x7AU, (uint8_t)0xD3U, (uint8_t)0xDFU,
-    (uint8_t)0x1EU, (uint8_t)0xD5U, (uint8_t)0xD5U, (uint8_t)0xFDU, (uint8_t)0x65U, (uint8_t)0x61U,
-    (uint8_t)0x24U, (uint8_t)0x33U, (uint8_t)0xF5U, (uint8_t)0x1FU, (uint8_t)0x5FU, (uint8_t)0x06U,
-    (uint8_t)0x6EU, (uint8_t)0xD0U, (uint8_t)0x85U, (uint8_t)0x63U, (uint8_t)0x65U, (uint8_t)0x55U,
-    (uint8_t)0x3DU, (uint8_t)0xEDU, (uint8_t)0x1AU, (uint8_t)0xF3U, (uint8_t)0xB5U, (uint8_t)0x57U,
-    (uint8_t)0x13U, (uint8_t)0x5EU, (uint8_t)0x7FU, (uint8_t)0x57U, (uint8_t)0xC9U, (uint8_t)0x35U,
-    (uint8_t)0x98U, (uint8_t)0x4FU, (uint8_t)0x0CU, (uint8_t)0x70U, (uint8_t)0xE0U, (uint8_t)0xE6U,
-    (uint8_t)0x8BU, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0xA6U, (uint8_t)0x89U, (uint8_t)0xDAU,
-    (uint8_t)0xF3U, (uint8_t)0xEFU, (uint8_t)0xE8U, (uint8_t)0x72U, (uint8_t)0x1DU, (uint8_t)0xF1U,
-    (uint8_t)0x58U, (uint8_t)0xA1U, (uint8_t)0x36U, (uint8_t)0xADU, (uint8_t)0xE7U, (uint8_t)0x35U,
-    (uint8_t)0x30U, (uint8_t)0xACU, (uint8_t)0xCAU, (uint8_t)0x4FU, (uint8_t)0x48U, (uint8_t)0x3AU,
-    (uint8_t)0x79U, (uint8_t)0x7AU, (uint8_t)0xBCU, (uint8_t)0x0AU, (uint8_t)0xB1U, (uint8_t)0x82U,
-    (uint8_t)0xB3U, (uint8_t)0x24U, (uint8_t)0xFBU, (uint8_t)0x61U, (uint8_t)0xD1U, (uint8_t)0x08U,
-    (uint8_t)0xA9U, (uint8_t)0x4BU, (uint8_t)0xB2U, (uint8_t)0xC8U, (uint8_t)0xE3U, (uint8_t)0xFBU,
-    (uint8_t)0xB9U, (uint8_t)0x6AU, (uint8_t)0xDAU, (uint8_t)0xB7U, (uint8_t)0x60U, (uint8_t)0xD7U,
-    (uint8_t)0xF4U, (uint8_t)0x68U, (uint8_t)0x1DU, (uint8_t)0x4FU, (uint8_t)0x42U, (uint8_t)0xA3U,
-    (uint8_t)0xDEU, (uint8_t)0x39U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xAEU, (uint8_t)0x56U,
-    (uint8_t)0xEDU, (uint8_t)0xE7U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0xBBU, (uint8_t)0x19U,
-    (uint8_t)0x0BU, (uint8_t)0x07U, (uint8_t)0xA7U, (uint8_t)0xC8U, (uint8_t)0xEEU, (uint8_t)0x0AU,
-    (uint8_t)0x6DU, (uint8_t)0x70U, (uint8_t)0x9EU, (uint8_t)0x02U, (uint8_t)0xFCU, (uint8_t)0xE1U,
-    (uint8_t)0xCDU, (uint8_t)0xF7U, (uint8_t)0xE2U, (uint8_t)0xECU, (uint8_t)0xC0U, (uint8_t)0x34U,
-    (uint8_t)0x04U, (uint8_t)0xCDU, (uint8_t)0x28U, (uint8_t)0x34U, (uint8_t)0x2FU, (uint8_t)0x61U,
-    (uint8_t)0x91U, (uint8_t)0x72U, (uint8_t)0xFEU, (uint8_t)0x9CU, (uint8_t)0xE9U, (uint8_t)0x85U,
-    (uint8_t)0x83U, (uint8_t)0xFFU, (uint8_t)0x8EU, (uint8_t)0x4FU, (uint8_t)0x12U, (uint8_t)0x32U,
-    (uint8_t)0xEEU, (uint8_t)0xF2U, (uint8_t)0x81U, (uint8_t)0x83U, (uint8_t)0xC3U, (uint8_t)0xFEU,
-    (uint8_t)0x3BU, (uint8_t)0x1BU, (uint8_t)0x4CU, (uint8_t)0x6FU, (uint8_t)0xADU, (uint8_t)0x73U,
-    (uint8_t)0x3BU, (uint8_t)0xB5U, (uint8_t)0xFCU, (uint8_t)0xBCU, (uint8_t)0x2EU, (uint8_t)0xC2U,
-    (uint8_t)0x20U, (uint8_t)0x05U, (uint8_t)0xC5U, (uint8_t)0x8EU, (uint8_t)0xF1U, (uint8_t)0x83U,
-    (uint8_t)0x7DU, (uint8_t)0x16U, (uint8_t)0x83U, (uint8_t)0xB2U, (uint8_t)0xC6U, (uint8_t)0xF3U,
-    (uint8_t)0x4AU, (uint8_t)0x26U, (uint8_t)0xC1U, (uint8_t)0xB2U, (uint8_t)0xEFU, (uint8_t)0xFAU,
-    (uint8_t)0x88U, (uint8_t)0x6BU, (uint8_t)0x42U, (uint8_t)0x38U, (uint8_t)0x61U, (uint8_t)0x1FU,
-    (uint8_t)0xCFU, (uint8_t)0xDCU, (uint8_t)0xDEU, (uint8_t)0x35U, (uint8_t)0x5BU, (uint8_t)0x3BU,
-    (uint8_t)0x65U, (uint8_t)0x19U, (uint8_t)0x03U, (uint8_t)0x5BU, (uint8_t)0xBCU, (uint8_t)0x34U,
-    (uint8_t)0xF4U, (uint8_t)0xDEU, (uint8_t)0xF9U, (uint8_t)0x9CU, (uint8_t)0x02U, (uint8_t)0x38U,
-    (uint8_t)0x61U, (uint8_t)0xB4U, (uint8_t)0x6FU, (uint8_t)0xC9U, (uint8_t)0xD6U, (uint8_t)0xE6U,
-    (uint8_t)0xC9U, (uint8_t)0x07U, (uint8_t)0x7AU, (uint8_t)0xD9U, (uint8_t)0x1DU, (uint8_t)0x26U,
-    (uint8_t)0x91U, (uint8_t)0xF7U, (uint8_t)0xF7U, (uint8_t)0xEEU, (uint8_t)0x59U, (uint8_t)0x8CU,
-    (uint8_t)0xB0U, (uint8_t)0xFAU, (uint8_t)0xC1U, (uint8_t)0x86U, (uint8_t)0xD9U, (uint8_t)0x1CU,
-    (uint8_t)0xAEU, (uint8_t)0xFEU, (uint8_t)0x13U, (uint8_t)0x09U, (uint8_t)0x85U, (uint8_t)0x13U,
-    (uint8_t)0x92U, (uint8_t)0x70U, (uint8_t)0xB4U, (uint8_t)0x13U, (uint8_t)0x0CU, (uint8_t)0x93U,
-    (uint8_t)0xBCU, (uint8_t)0x43U, (uint8_t)0x79U, (uint8_t)0x44U, (uint8_t)0xF4U, (uint8_t)0xFDU,
-    (uint8_t)0x44U, (uint8_t)0x52U, (uint8_t)0xE2U, (uint8_t)0xD7U, (uint8_t)0x4DU, (uint8_t)0xD3U,
-    (uint8_t)0x64U, (uint8_t)0xF2U, (uint8_t)0xE2U, (uint8_t)0x1EU, (uint8_t)0x71U, (uint8_t)0xF5U,
-    (uint8_t)0x4BU, (uint8_t)0xFFU, (uint8_t)0x5CU, (uint8_t)0xAEU, (uint8_t)0x82U, (uint8_t)0xABU,
-    (uint8_t)0x9CU, (uint8_t)0x9DU, (uint8_t)0xF6U, (uint8_t)0x9EU, (uint8_t)0xE8U, (uint8_t)0x6DU,
-    (uint8_t)0x2BU, (uint8_t)0xC5U, (uint8_t)0x22U, (uint8_t)0x36U, (uint8_t)0x3AU, (uint8_t)0x0DU,
-    (uint8_t)0xABU, (uint8_t)0xC5U, (uint8_t)0x21U, (uint8_t)0x97U, (uint8_t)0x9BU, (uint8_t)0x0DU,
-    (uint8_t)0xEAU, (uint8_t)0xDAU, (uint8_t)0x1DU, (uint8_t)0xBFU, (uint8_t)0x9AU, (uint8_t)0x42U,
-    (uint8_t)0xD5U, (uint8_t)0xC4U, (uint8_t)0x48U, (uint8_t)0x4EU, (uint8_t)0x0AU, (uint8_t)0xBCU,
-    (uint8_t)0xD0U, (uint8_t)0x6BU, (uint8_t)0xFAU, (uint8_t)0x53U, (uint8_t)0xDDU, (uint8_t)0xEFU,
-    (uint8_t)0x3CU, (uint8_t)0x1BU, (uint8_t)0x20U, (uint8_t)0xEEU, (uint8_t)0x3FU, (uint8_t)0xD5U,
-    (uint8_t)0x9DU, (uint8_t)0x7CU, (uint8_t)0x25U, (uint8_t)0xE4U, (uint8_t)0x1DU, (uint8_t)0x2BU,
-    (uint8_t)0x66U, (uint8_t)0x9EU, (uint8_t)0x1EU, (uint8_t)0xF1U, (uint8_t)0x6EU, (uint8_t)0x6FU,
-    (uint8_t)0x52U, (uint8_t)0xC3U, (uint8_t)0x16U, (uint8_t)0x4DU, (uint8_t)0xF4U, (uint8_t)0xFBU,
-    (uint8_t)0x79U, (uint8_t)0x30U, (uint8_t)0xE9U, (uint8_t)0xE4U, (uint8_t)0xE5U, (uint8_t)0x88U,
-    (uint8_t)0x57U, (uint8_t)0xB6U, (uint8_t)0xACU, (uint8_t)0x7DU, (uint8_t)0x5FU, (uint8_t)0x42U,
-    (uint8_t)0xD6U, (uint8_t)0x9FU, (uint8_t)0x6DU, (uint8_t)0x18U, (uint8_t)0x77U, (uint8_t)0x63U,
-    (uint8_t)0xCFU, (uint8_t)0x1DU, (uint8_t)0x55U, (uint8_t)0x03U, (uint8_t)0x40U, (uint8_t)0x04U,
-    (uint8_t)0x87U, (uint8_t)0xF5U, (uint8_t)0x5BU, (uint8_t)0xA5U, (uint8_t)0x7EU, (uint8_t)0x31U,
-    (uint8_t)0xCCU, (uint8_t)0x7AU, (uint8_t)0x71U, (uint8_t)0x35U, (uint8_t)0xC8U, (uint8_t)0x86U,
-    (uint8_t)0xEFU, (uint8_t)0xB4U, (uint8_t)0x31U, (uint8_t)0x8AU, (uint8_t)0xEDU, (uint8_t)0x6AU,
-    (uint8_t)0x1EU, (uint8_t)0x01U, (uint8_t)0x2DU, (uint8_t)0x9EU, (uint8_t)0x68U, (uint8_t)0x32U,
-    (uint8_t)0xA9U, (uint8_t)0x07U, (uint8_t)0x60U, (uint8_t)0x0AU, (uint8_t)0x91U, (uint8_t)0x81U,
-    (uint8_t)0x30U, (uint8_t)0xC4U, (uint8_t)0x6DU, (uint8_t)0xC7U, (uint8_t)0x78U, (uint8_t)0xF9U,
-    (uint8_t)0x71U, (uint8_t)0xADU, (uint8_t)0x00U, (uint8_t)0x38U, (uint8_t)0x09U, (uint8_t)0x29U,
-    (uint8_t)0x99U, (uint8_t)0xA3U, (uint8_t)0x33U, (uint8_t)0xCBU, (uint8_t)0x8BU, (uint8_t)0x7AU,
-    (uint8_t)0x1AU, (uint8_t)0x1DU, (uint8_t)0xB9U, (uint8_t)0x3DU, (uint8_t)0x71U, (uint8_t)0x40U,
-    (uint8_t)0x00U, (uint8_t)0x3CU, (uint8_t)0x2AU, (uint8_t)0x4EU, (uint8_t)0xCEU, (uint8_t)0xA9U,
-    (uint8_t)0xF9U, (uint8_t)0x8DU, (uint8_t)0x0AU, (uint8_t)0xCCU, (uint8_t)0x0AU, (uint8_t)0x82U,
-    (uint8_t)0x91U, (uint8_t)0xCDU, (uint8_t)0xCEU, (uint8_t)0xC9U, (uint8_t)0x7DU, (uint8_t)0xCFU,
-    (uint8_t)0x8EU, (uint8_t)0xC9U, (uint8_t)0xB5U, (uint8_t)0x5AU, (uint8_t)0x7FU, (uint8_t)0x88U,
-    (uint8_t)0xA4U, (uint8_t)0x6BU, (uint8_t)0x4DU, (uint8_t)0xB5U, (uint8_t)0xA8U, (uint8_t)0x51U,
-    (uint8_t)0xF4U, (uint8_t)0x41U, (uint8_t)0x82U, (uint8_t)0xE1U, (uint8_t)0xC6U, (uint8_t)0x8AU,
-    (uint8_t)0x00U, (uint8_t)0x7EU, (uint8_t)0x5EU, (uint8_t)0x0DU, (uint8_t)0xD9U, (uint8_t)0x02U,
-    (uint8_t)0x0BU, (uint8_t)0xFDU, (uint8_t)0x64U, (uint8_t)0xB6U, (uint8_t)0x45U, (uint8_t)0x03U,
-    (uint8_t)0x6CU, (uint8_t)0x7AU, (uint8_t)0x4EU, (uint8_t)0x67U, (uint8_t)0x7DU, (uint8_t)0x2CU,
-    (uint8_t)0x38U, (uint8_t)0x53U, (uint8_t)0x2AU, (uint8_t)0x3AU, (uint8_t)0x23U, (uint8_t)0xBAU,
-    (uint8_t)0x44U, (uint8_t)0x42U, (uint8_t)0xCAU, (uint8_t)0xF5U, (uint8_t)0x3EU, (uint8_t)0xA6U,
-    (uint8_t)0x3BU, (uint8_t)0xB4U, (uint8_t)0x54U, (uint8_t)0x32U, (uint8_t)0x9BU, (uint8_t)0x76U,
-    (uint8_t)0x24U, (uint8_t)0xC8U, (uint8_t)0x91U, (uint8_t)0x7BU, (uint8_t)0xDDU, (uint8_t)0x64U,
-    (uint8_t)0xB1U, (uint8_t)0xC0U, (uint8_t)0xFDU, (uint8_t)0x4CU, (uint8_t)0xB3U, (uint8_t)0x8EU,
-    (uint8_t)0x8CU, (uint8_t)0x33U, (uint8_t)0x4CU, (uint8_t)0x70U, (uint8_t)0x1CU, (uint8_t)0x3AU,
-    (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x06U, (uint8_t)0x57U, (uint8_t)0xFCU, (uint8_t)0xCFU,
-    (uint8_t)0xECU, (uint8_t)0x71U, (uint8_t)0x9BU, (uint8_t)0x1FU, (uint8_t)0x5CU, (uint8_t)0x3EU,
-    (uint8_t)0x4EU, (uint8_t)0x46U, (uint8_t)0x04U, (uint8_t)0x1FU, (uint8_t)0x38U, (uint8_t)0x81U,
-    (uint8_t)0x47U, (uint8_t)0xFBU, (uint8_t)0x4CU, (uint8_t)0xFDU, (uint8_t)0xB4U, (uint8_t)0x77U,
-    (uint8_t)0xA5U, (uint8_t)0x24U, (uint8_t)0x71U, (uint8_t)0xF7U, (uint8_t)0xA9U, (uint8_t)0xA9U,
-    (uint8_t)0x69U, (uint8_t)0x10U, (uint8_t)0xB8U, (uint8_t)0x55U, (uint8_t)0x32U, (uint8_t)0x2EU,
-    (uint8_t)0xDBU, (uint8_t)0x63U, (uint8_t)0x40U, (uint8_t)0xD8U, (uint8_t)0xA0U, (uint8_t)0x0EU,
-    (uint8_t)0xF0U, (uint8_t)0x92U, (uint8_t)0x35U, (uint8_t)0x05U, (uint8_t)0x11U, (uint8_t)0xE3U,
-    (uint8_t)0x0AU, (uint8_t)0xBEU, (uint8_t)0xC1U, (uint8_t)0xFFU, (uint8_t)0xF9U, (uint8_t)0xE3U,
-    (uint8_t)0xA2U, (uint8_t)0x6EU, (uint8_t)0x7FU, (uint8_t)0xB2U, (uint8_t)0x9FU, (uint8_t)0x8CU,
-    (uint8_t)0x18U, (uint8_t)0x30U, (uint8_t)0x23U, (uint8_t)0xC3U, (uint8_t)0x58U, (uint8_t)0x7EU,
-    (uint8_t)0x38U, (uint8_t)0xDAU, (uint8_t)0x00U, (uint8_t)0x77U, (uint8_t)0xD9U, (uint8_t)0xB4U,
-    (uint8_t)0x76U, (uint8_t)0x3EU, (uint8_t)0x4EU, (uint8_t)0x4BU, (uint8_t)0x94U, (uint8_t)0xB2U,
-    (uint8_t)0xBBU, (uint8_t)0xC1U, (uint8_t)0x94U, (uint8_t)0xC6U, (uint8_t)0x65U, (uint8_t)0x1EU,
-    (uint8_t)0x77U, (uint8_t)0xCAU, (uint8_t)0xF9U, (uint8_t)0x92U, (uint8_t)0xEEU, (uint8_t)0xAAU,
-    (uint8_t)0xC0U, (uint8_t)0x23U, (uint8_t)0x2AU, (uint8_t)0x28U, (uint8_t)0x1BU, (uint8_t)0xF6U,
-    (uint8_t)0xB3U, (uint8_t)0xA7U, (uint8_t)0x39U, (uint8_t)0xC1U, (uint8_t)0x22U, (uint8_t)0x61U,
-    (uint8_t)0x16U, (uint8_t)0x82U, (uint8_t)0x0AU, (uint8_t)0xE8U, (uint8_t)0xDBU, (uint8_t)0x58U,
-    (uint8_t)0x47U, (uint8_t)0xA6U, (uint8_t)0x7CU, (uint8_t)0xBEU, (uint8_t)0xF9U, (uint8_t)0xC9U,
-    (uint8_t)0x09U, (uint8_t)0x1BU, (uint8_t)0x46U, (uint8_t)0x2DU, (uint8_t)0x53U, (uint8_t)0x8CU,
-    (uint8_t)0xD7U, (uint8_t)0x2BU, (uint8_t)0x03U, (uint8_t)0x74U, (uint8_t)0x6AU, (uint8_t)0xE7U,
-    (uint8_t)0x7FU, (uint8_t)0x5EU, (uint8_t)0x62U, (uint8_t)0x29U, (uint8_t)0x2CU, (uint8_t)0x31U,
-    (uint8_t)0x15U, (uint8_t)0x62U, (uint8_t)0xA8U, (uint8_t)0x46U, (uint8_t)0x50U, (uint8_t)0x5DU,
-    (uint8_t)0xC8U, (uint8_t)0x2DU, (uint8_t)0xB8U, (uint8_t)0x54U, (uint8_t)0x33U, (uint8_t)0x8AU,
-    (uint8_t)0xE4U, (uint8_t)0x9FU, (uint8_t)0x52U, (uint8_t)0x35U, (uint8_t)0xC9U, (uint8_t)0x5BU,
-    (uint8_t)0x91U, (uint8_t)0x17U, (uint8_t)0x8CU, (uint8_t)0xCFU, (uint8_t)0x2DU, (uint8_t)0xD5U,
-    (uint8_t)0xCAU, (uint8_t)0xCEU, (uint8_t)0xF4U, (uint8_t)0x03U, (uint8_t)0xECU, (uint8_t)0x9DU,
-    (uint8_t)0x18U, (uint8_t)0x10U, (uint8_t)0xC6U, (uint8_t)0x27U, (uint8_t)0x2BU, (uint8_t)0x04U,
-    (uint8_t)0x5BU, (uint8_t)0x3BU, (uint8_t)0x71U, (uint8_t)0xF9U, (uint8_t)0xDCU, (uint8_t)0x6BU,
-    (uint8_t)0x80U, (uint8_t)0xD6U, (uint8_t)0x3FU, (uint8_t)0xDDU, (uint8_t)0x4AU, (uint8_t)0x8EU,
-    (uint8_t)0x9AU, (uint8_t)0xDBU, (uint8_t)0x1EU, (uint8_t)0x69U, (uint8_t)0x62U, (uint8_t)0xA6U,
-    (uint8_t)0x95U, (uint8_t)0x26U, (uint8_t)0xD4U, (uint8_t)0x31U, (uint8_t)0x61U, (uint8_t)0xC1U,
-    (uint8_t)0xA4U, (uint8_t)0x1DU, (uint8_t)0x57U, (uint8_t)0x0DU, (uint8_t)0x79U, (uint8_t)0x38U,
-    (uint8_t)0xDAU, (uint8_t)0xD4U, (uint8_t)0xA4U, (uint8_t)0x0EU, (uint8_t)0x32U, (uint8_t)0x9CU,
-    (uint8_t)0xCFU, (uint8_t)0xF4U, (uint8_t)0x6AU, (uint8_t)0xAAU, (uint8_t)0x36U, (uint8_t)0xADU,
-    (uint8_t)0x00U, (uint8_t)0x4CU, (uint8_t)0xF6U, (uint8_t)0x00U, (uint8_t)0xC8U, (uint8_t)0x38U,
-    (uint8_t)0x1EU, (uint8_t)0x42U, (uint8_t)0x5AU, (uint8_t)0x31U, (uint8_t)0xD9U, (uint8_t)0x51U,
-    (uint8_t)0xAEU, (uint8_t)0x64U, (uint8_t)0xFDU, (uint8_t)0xB2U, (uint8_t)0x3FU, (uint8_t)0xCEU,
-    (uint8_t)0xC9U, (uint8_t)0x50U, (uint8_t)0x9DU, (uint8_t)0x43U, (uint8_t)0x68U, (uint8_t)0x7FU,
-    (uint8_t)0xEBU, (uint8_t)0x69U, (uint8_t)0xEDU, (uint8_t)0xD1U, (uint8_t)0xCCU, (uint8_t)0x5EU,
-    (uint8_t)0x0BU, (uint8_t)0x8CU, (uint8_t)0xC3U, (uint8_t)0xBDU, (uint8_t)0xF6U, (uint8_t)0x4BU,
-    (uint8_t)0x10U, (uint8_t)0xEFU, (uint8_t)0x86U, (uint8_t)0xB6U, (uint8_t)0x31U, (uint8_t)0x42U,
-    (uint8_t)0xA3U, (uint8_t)0xABU, (uint8_t)0x88U, (uint8_t)0x29U, (uint8_t)0x55U, (uint8_t)0x5BU,
-    (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x7CU, (uint8_t)0x93U, (uint8_t)0x26U, (uint8_t)0x65U,
-    (uint8_t)0xCBU, (uint8_t)0x2CU, (uint8_t)0x0FU, (uint8_t)0x1CU, (uint8_t)0xC0U, (uint8_t)0x1BU,
-    (uint8_t)0xD7U, (uint8_t)0x02U, (uint8_t)0x29U, (uint8_t)0x38U, (uint8_t)0x88U, (uint8_t)0x39U,
-    (uint8_t)0xD2U, (uint8_t)0xAFU, (uint8_t)0x05U, (uint8_t)0xE4U, (uint8_t)0x54U, (uint8_t)0x50U,
-    (uint8_t)0x4AU, (uint8_t)0xC7U, (uint8_t)0x8BU, (uint8_t)0x75U, (uint8_t)0x82U, (uint8_t)0x82U,
-    (uint8_t)0x28U, (uint8_t)0x46U, (uint8_t)0xC0U, (uint8_t)0xBAU, (uint8_t)0x35U, (uint8_t)0xC3U,
-    (uint8_t)0x5FU, (uint8_t)0x5CU, (uint8_t)0x59U, (uint8_t)0x16U, (uint8_t)0x0CU, (uint8_t)0xC0U,
-    (uint8_t)0x46U, (uint8_t)0xFDU, (uint8_t)0x82U, (uint8_t)0x51U, (uint8_t)0x54U, (uint8_t)0x1FU,
-    (uint8_t)0xC6U, (uint8_t)0x8CU, (uint8_t)0x9CU, (uint8_t)0x86U, (uint8_t)0xB0U, (uint8_t)0x22U,
-    (uint8_t)0xBBU, (uint8_t)0x70U, (uint8_t)0x99U, (uint8_t)0x87U, (uint8_t)0x6AU, (uint8_t)0x46U,
-    (uint8_t)0x0EU, (uint8_t)0x74U, (uint8_t)0x51U, (uint8_t)0xA8U, (uint8_t)0xA9U, (uint8_t)0x31U,
-    (uint8_t)0x09U, (uint8_t)0x70U, (uint8_t)0x3FU, (uint8_t)0xEEU, (uint8_t)0x1CU, (uint8_t)0x21U,
-    (uint8_t)0x7EU, (uint8_t)0x6CU, (uint8_t)0x38U, (uint8_t)0x26U, (uint8_t)0xE5U, (uint8_t)0x2CU,
-    (uint8_t)0x51U, (uint8_t)0xAAU, (uint8_t)0x69U, (uint8_t)0x1EU, (uint8_t)0x0EU, (uint8_t)0x42U,
-    (uint8_t)0x3CU, (uint8_t)0xFCU, (uint8_t)0x99U, (uint8_t)0xE9U, (uint8_t)0xE3U, (uint8_t)0x16U,
-    (uint8_t)0x50U, (uint8_t)0xC1U, (uint8_t)0x21U, (uint8_t)0x7BU, (uint8_t)0x62U, (uint8_t)0x48U,
-    (uint8_t)0x16U, (uint8_t)0xCDU, (uint8_t)0xADU, (uint8_t)0x9AU, (uint8_t)0x95U, (uint8_t)0xF9U,
-    (uint8_t)0xD5U, (uint8_t)0xB8U, (uint8_t)0x01U, (uint8_t)0x94U, (uint8_t)0x88U, (uint8_t)0xD9U,
-    (uint8_t)0xC0U, (uint8_t)0xA0U, (uint8_t)0xA1U, (uint8_t)0xFEU, (uint8_t)0x30U, (uint8_t)0x75U,
-    (uint8_t)0xA5U, (uint8_t)0x77U, (uint8_t)0xE2U, (uint8_t)0x31U, (uint8_t)0x83U, (uint8_t)0xF8U,
-    (uint8_t)0x1DU, (uint8_t)0x4AU, (uint8_t)0x3FU, (uint8_t)0x2FU, (uint8_t)0xA4U, (uint8_t)0x57U,
-    (uint8_t)0x1EU, (uint8_t)0xFCU, (uint8_t)0x8CU, (uint8_t)0xE0U, (uint8_t)0xBAU, (uint8_t)0x8AU,
-    (uint8_t)0x4FU, (uint8_t)0xE8U, (uint8_t)0xB6U, (uint8_t)0x85U, (uint8_t)0x5DU, (uint8_t)0xFEU,
-    (uint8_t)0x72U, (uint8_t)0xB0U, (uint8_t)0xA6U, (uint8_t)0x6EU, (uint8_t)0xDEU, (uint8_t)0xD2U,
-    (uint8_t)0xFBU, (uint8_t)0xABU, (uint8_t)0xFBU, (uint8_t)0xE5U, (uint8_t)0x8AU, (uint8_t)0x30U,
-    (uint8_t)0xFAU, (uint8_t)0xFAU, (uint8_t)0xBEU, (uint8_t)0x1CU, (uint8_t)0x5DU, (uint8_t)0x71U,
-    (uint8_t)0xA8U, (uint8_t)0x7EU, (uint8_t)0x2FU, (uint8_t)0x74U, (uint8_t)0x1EU, (uint8_t)0xF8U,
-    (uint8_t)0xC1U, (uint8_t)0xFEU, (uint8_t)0x86U, (uint8_t)0xFEU, (uint8_t)0xA6U, (uint8_t)0xBBU,
-    (uint8_t)0xFDU, (uint8_t)0xE5U, (uint8_t)0x30U, (uint8_t)0x67U, (uint8_t)0x7FU, (uint8_t)0x0DU,
-    (uint8_t)0x97U, (uint8_t)0xD1U, (uint8_t)0x1DU, (uint8_t)0x49U, (uint8_t)0xF7U, (uint8_t)0xA8U,
-    (uint8_t)0x44U, (uint8_t)0x3DU, (uint8_t)0x08U, (uint8_t)0x22U, (uint8_t)0xE5U, (uint8_t)0x06U,
-    (uint8_t)0xA9U, (uint8_t)0xF4U, (uint8_t)0x61U, (uint8_t)0x4EU, (uint8_t)0x01U, (uint8_t)0x1EU,
-    (uint8_t)0x2AU, (uint8_t)0x94U, (uint8_t)0x83U, (uint8_t)0x8FU, (uint8_t)0xF8U, (uint8_t)0x8CU,
-    (uint8_t)0xD6U, (uint8_t)0x8CU, (uint8_t)0x8BU, (uint8_t)0xB7U, (uint8_t)0xC5U, (uint8_t)0xC6U,
-    (uint8_t)0x42U, (uint8_t)0x4CU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU,
-    (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU, (uint8_t)0xFFU
+    0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xADU, 0xF8U, 0x54U, 0x58U, 0xA2U,
+    0xBBU, 0x4AU, 0x9AU, 0xAFU, 0xDCU, 0x56U, 0x20U, 0x27U, 0x3DU, 0x3CU, 0xF1U, 0xD8U, 0xB9U,
+    0xC5U, 0x83U, 0xCEU, 0x2DU, 0x36U, 0x95U, 0xA9U, 0xE1U, 0x36U, 0x41U, 0x14U, 0x64U, 0x33U,
+    0xFBU, 0xCCU, 0x93U, 0x9DU, 0xCEU, 0x24U, 0x9BU, 0x3EU, 0xF9U, 0x7DU, 0x2FU, 0xE3U, 0x63U,
+    0x63U, 0x0CU, 0x75U, 0xD8U, 0xF6U, 0x81U, 0xB2U, 0x02U, 0xAEU, 0xC4U, 0x61U, 0x7AU, 0xD3U,
+    0xDFU, 0x1EU, 0xD5U, 0xD5U, 0xFDU, 0x65U, 0x61U, 0x24U, 0x33U, 0xF5U, 0x1FU, 0x5FU, 0x06U,
+    0x6EU, 0xD0U, 0x85U, 0x63U, 0x65U, 0x55U, 0x3DU, 0xEDU, 0x1AU, 0xF3U, 0xB5U, 0x57U, 0x13U,
+    0x5EU, 0x7FU, 0x57U, 0xC9U, 0x35U, 0x98U, 0x4FU, 0x0CU, 0x70U, 0xE0U, 0xE6U, 0x8BU, 0x77U,
+    0xE2U, 0xA6U, 0x89U, 0xDAU, 0xF3U, 0xEFU, 0xE8U, 0x72U, 0x1DU, 0xF1U, 0x58U, 0xA1U, 0x36U,
+    0xADU, 0xE7U, 0x35U, 0x30U, 0xACU, 0xCAU, 0x4FU, 0x48U, 0x3AU, 0x79U, 0x7AU, 0xBCU, 0x0AU,
+    0xB1U, 0x82U, 0xB3U, 0x24U, 0xFBU, 0x61U, 0xD1U, 0x08U, 0xA9U, 0x4BU, 0xB2U, 0xC8U, 0xE3U,
+    0xFBU, 0xB9U, 0x6AU, 0xDAU, 0xB7U, 0x60U, 0xD7U, 0xF4U, 0x68U, 0x1DU, 0x4FU, 0x42U, 0xA3U,
+    0xDEU, 0x39U, 0x4DU, 0xF4U, 0xAEU, 0x56U, 0xEDU, 0xE7U, 0x63U, 0x72U, 0xBBU, 0x19U, 0x0BU,
+    0x07U, 0xA7U, 0xC8U, 0xEEU, 0x0AU, 0x6DU, 0x70U, 0x9EU, 0x02U, 0xFCU, 0xE1U, 0xCDU, 0xF7U,
+    0xE2U, 0xECU, 0xC0U, 0x34U, 0x04U, 0xCDU, 0x28U, 0x34U, 0x2FU, 0x61U, 0x91U, 0x72U, 0xFEU,
+    0x9CU, 0xE9U, 0x85U, 0x83U, 0xFFU, 0x8EU, 0x4FU, 0x12U, 0x32U, 0xEEU, 0xF2U, 0x81U, 0x83U,
+    0xC3U, 0xFEU, 0x3BU, 0x1BU, 0x4CU, 0x6FU, 0xADU, 0x73U, 0x3BU, 0xB5U, 0xFCU, 0xBCU, 0x2EU,
+    0xC2U, 0x20U, 0x05U, 0xC5U, 0x8EU, 0xF1U, 0x83U, 0x7DU, 0x16U, 0x83U, 0xB2U, 0xC6U, 0xF3U,
+    0x4AU, 0x26U, 0xC1U, 0xB2U, 0xEFU, 0xFAU, 0x88U, 0x6BU, 0x42U, 0x38U, 0x61U, 0x1FU, 0xCFU,
+    0xDCU, 0xDEU, 0x35U, 0x5BU, 0x3BU, 0x65U, 0x19U, 0x03U, 0x5BU, 0xBCU, 0x34U, 0xF4U, 0xDEU,
+    0xF9U, 0x9CU, 0x02U, 0x38U, 0x61U, 0xB4U, 0x6FU, 0xC9U, 0xD6U, 0xE6U, 0xC9U, 0x07U, 0x7AU,
+    0xD9U, 0x1DU, 0x26U, 0x91U, 0xF7U, 0xF7U, 0xEEU, 0x59U, 0x8CU, 0xB0U, 0xFAU, 0xC1U, 0x86U,
+    0xD9U, 0x1CU, 0xAEU, 0xFEU, 0x13U, 0x09U, 0x85U, 0x13U, 0x92U, 0x70U, 0xB4U, 0x13U, 0x0CU,
+    0x93U, 0xBCU, 0x43U, 0x79U, 0x44U, 0xF4U, 0xFDU, 0x44U, 0x52U, 0xE2U, 0xD7U, 0x4DU, 0xD3U,
+    0x64U, 0xF2U, 0xE2U, 0x1EU, 0x71U, 0xF5U, 0x4BU, 0xFFU, 0x5CU, 0xAEU, 0x82U, 0xABU, 0x9CU,
+    0x9DU, 0xF6U, 0x9EU, 0xE8U, 0x6DU, 0x2BU, 0xC5U, 0x22U, 0x36U, 0x3AU, 0x0DU, 0xABU, 0xC5U,
+    0x21U, 0x97U, 0x9BU, 0x0DU, 0xEAU, 0xDAU, 0x1DU, 0xBFU, 0x9AU, 0x42U, 0xD5U, 0xC4U, 0x48U,
+    0x4EU, 0x0AU, 0xBCU, 0xD0U, 0x6BU, 0xFAU, 0x53U, 0xDDU, 0xEFU, 0x3CU, 0x1BU, 0x20U, 0xEEU,
+    0x3FU, 0xD5U, 0x9DU, 0x7CU, 0x25U, 0xE4U, 0x1DU, 0x2BU, 0x66U, 0x9EU, 0x1EU, 0xF1U, 0x6EU,
+    0x6FU, 0x52U, 0xC3U, 0x16U, 0x4DU, 0xF4U, 0xFBU, 0x79U, 0x30U, 0xE9U, 0xE4U, 0xE5U, 0x88U,
+    0x57U, 0xB6U, 0xACU, 0x7DU, 0x5FU, 0x42U, 0xD6U, 0x9FU, 0x6DU, 0x18U, 0x77U, 0x63U, 0xCFU,
+    0x1DU, 0x55U, 0x03U, 0x40U, 0x04U, 0x87U, 0xF5U, 0x5BU, 0xA5U, 0x7EU, 0x31U, 0xCCU, 0x7AU,
+    0x71U, 0x35U, 0xC8U, 0x86U, 0xEFU, 0xB4U, 0x31U, 0x8AU, 0xEDU, 0x6AU, 0x1EU, 0x01U, 0x2DU,
+    0x9EU, 0x68U, 0x32U, 0xA9U, 0x07U, 0x60U, 0x0AU, 0x91U, 0x81U, 0x30U, 0xC4U, 0x6DU, 0xC7U,
+    0x78U, 0xF9U, 0x71U, 0xADU, 0x00U, 0x38U, 0x09U, 0x29U, 0x99U, 0xA3U, 0x33U, 0xCBU, 0x8BU,
+    0x7AU, 0x1AU, 0x1DU, 0xB9U, 0x3DU, 0x71U, 0x40U, 0x00U, 0x3CU, 0x2AU, 0x4EU, 0xCEU, 0xA9U,
+    0xF9U, 0x8DU, 0x0AU, 0xCCU, 0x0AU, 0x82U, 0x91U, 0xCDU, 0xCEU, 0xC9U, 0x7DU, 0xCFU, 0x8EU,
+    0xC9U, 0xB5U, 0x5AU, 0x7FU, 0x88U, 0xA4U, 0x6BU, 0x4DU, 0xB5U, 0xA8U, 0x51U, 0xF4U, 0x41U,
+    0x82U, 0xE1U, 0xC6U, 0x8AU, 0x00U, 0x7EU, 0x5EU, 0x0DU, 0xD9U, 0x02U, 0x0BU, 0xFDU, 0x64U,
+    0xB6U, 0x45U, 0x03U, 0x6CU, 0x7AU, 0x4EU, 0x67U, 0x7DU, 0x2CU, 0x38U, 0x53U, 0x2AU, 0x3AU,
+    0x23U, 0xBAU, 0x44U, 0x42U, 0xCAU, 0xF5U, 0x3EU, 0xA6U, 0x3BU, 0xB4U, 0x54U, 0x32U, 0x9BU,
+    0x76U, 0x24U, 0xC8U, 0x91U, 0x7BU, 0xDDU, 0x64U, 0xB1U, 0xC0U, 0xFDU, 0x4CU, 0xB3U, 0x8EU,
+    0x8CU, 0x33U, 0x4CU, 0x70U, 0x1CU, 0x3AU, 0xCDU, 0xADU, 0x06U, 0x57U, 0xFCU, 0xCFU, 0xECU,
+    0x71U, 0x9BU, 0x1FU, 0x5CU, 0x3EU, 0x4EU, 0x46U, 0x04U, 0x1FU, 0x38U, 0x81U, 0x47U, 0xFBU,
+    0x4CU, 0xFDU, 0xB4U, 0x77U, 0xA5U, 0x24U, 0x71U, 0xF7U, 0xA9U, 0xA9U, 0x69U, 0x10U, 0xB8U,
+    0x55U, 0x32U, 0x2EU, 0xDBU, 0x63U, 0x40U, 0xD8U, 0xA0U, 0x0EU, 0xF0U, 0x92U, 0x35U, 0x05U,
+    0x11U, 0xE3U, 0x0AU, 0xBEU, 0xC1U, 0xFFU, 0xF9U, 0xE3U, 0xA2U, 0x6EU, 0x7FU, 0xB2U, 0x9FU,
+    0x8CU, 0x18U, 0x30U, 0x23U, 0xC3U, 0x58U, 0x7EU, 0x38U, 0xDAU, 0x00U, 0x77U, 0xD9U, 0xB4U,
+    0x76U, 0x3EU, 0x4EU, 0x4BU, 0x94U, 0xB2U, 0xBBU, 0xC1U, 0x94U, 0xC6U, 0x65U, 0x1EU, 0x77U,
+    0xCAU, 0xF9U, 0x92U, 0xEEU, 0xAAU, 0xC0U, 0x23U, 0x2AU, 0x28U, 0x1BU, 0xF6U, 0xB3U, 0xA7U,
+    0x39U, 0xC1U, 0x22U, 0x61U, 0x16U, 0x82U, 0x0AU, 0xE8U, 0xDBU, 0x58U, 0x47U, 0xA6U, 0x7CU,
+    0xBEU, 0xF9U, 0xC9U, 0x09U, 0x1BU, 0x46U, 0x2DU, 0x53U, 0x8CU, 0xD7U, 0x2BU, 0x03U, 0x74U,
+    0x6AU, 0xE7U, 0x7FU, 0x5EU, 0x62U, 0x29U, 0x2CU, 0x31U, 0x15U, 0x62U, 0xA8U, 0x46U, 0x50U,
+    0x5DU, 0xC8U, 0x2DU, 0xB8U, 0x54U, 0x33U, 0x8AU, 0xE4U, 0x9FU, 0x52U, 0x35U, 0xC9U, 0x5BU,
+    0x91U, 0x17U, 0x8CU, 0xCFU, 0x2DU, 0xD5U, 0xCAU, 0xCEU, 0xF4U, 0x03U, 0xECU, 0x9DU, 0x18U,
+    0x10U, 0xC6U, 0x27U, 0x2BU, 0x04U, 0x5BU, 0x3BU, 0x71U, 0xF9U, 0xDCU, 0x6BU, 0x80U, 0xD6U,
+    0x3FU, 0xDDU, 0x4AU, 0x8EU, 0x9AU, 0xDBU, 0x1EU, 0x69U, 0x62U, 0xA6U, 0x95U, 0x26U, 0xD4U,
+    0x31U, 0x61U, 0xC1U, 0xA4U, 0x1DU, 0x57U, 0x0DU, 0x79U, 0x38U, 0xDAU, 0xD4U, 0xA4U, 0x0EU,
+    0x32U, 0x9CU, 0xCFU, 0xF4U, 0x6AU, 0xAAU, 0x36U, 0xADU, 0x00U, 0x4CU, 0xF6U, 0x00U, 0xC8U,
+    0x38U, 0x1EU, 0x42U, 0x5AU, 0x31U, 0xD9U, 0x51U, 0xAEU, 0x64U, 0xFDU, 0xB2U, 0x3FU, 0xCEU,
+    0xC9U, 0x50U, 0x9DU, 0x43U, 0x68U, 0x7FU, 0xEBU, 0x69U, 0xEDU, 0xD1U, 0xCCU, 0x5EU, 0x0BU,
+    0x8CU, 0xC3U, 0xBDU, 0xF6U, 0x4BU, 0x10U, 0xEFU, 0x86U, 0xB6U, 0x31U, 0x42U, 0xA3U, 0xABU,
+    0x88U, 0x29U, 0x55U, 0x5BU, 0x2FU, 0x74U, 0x7CU, 0x93U, 0x26U, 0x65U, 0xCBU, 0x2CU, 0x0FU,
+    0x1CU, 0xC0U, 0x1BU, 0xD7U, 0x02U, 0x29U, 0x38U, 0x88U, 0x39U, 0xD2U, 0xAFU, 0x05U, 0xE4U,
+    0x54U, 0x50U, 0x4AU, 0xC7U, 0x8BU, 0x75U, 0x82U, 0x82U, 0x28U, 0x46U, 0xC0U, 0xBAU, 0x35U,
+    0xC3U, 0x5FU, 0x5CU, 0x59U, 0x16U, 0x0CU, 0xC0U, 0x46U, 0xFDU, 0x82U, 0x51U, 0x54U, 0x1FU,
+    0xC6U, 0x8CU, 0x9CU, 0x86U, 0xB0U, 0x22U, 0xBBU, 0x70U, 0x99U, 0x87U, 0x6AU, 0x46U, 0x0EU,
+    0x74U, 0x51U, 0xA8U, 0xA9U, 0x31U, 0x09U, 0x70U, 0x3FU, 0xEEU, 0x1CU, 0x21U, 0x7EU, 0x6CU,
+    0x38U, 0x26U, 0xE5U, 0x2CU, 0x51U, 0xAAU, 0x69U, 0x1EU, 0x0EU, 0x42U, 0x3CU, 0xFCU, 0x99U,
+    0xE9U, 0xE3U, 0x16U, 0x50U, 0xC1U, 0x21U, 0x7BU, 0x62U, 0x48U, 0x16U, 0xCDU, 0xADU, 0x9AU,
+    0x95U, 0xF9U, 0xD5U, 0xB8U, 0x01U, 0x94U, 0x88U, 0xD9U, 0xC0U, 0xA0U, 0xA1U, 0xFEU, 0x30U,
+    0x75U, 0xA5U, 0x77U, 0xE2U, 0x31U, 0x83U, 0xF8U, 0x1DU, 0x4AU, 0x3FU, 0x2FU, 0xA4U, 0x57U,
+    0x1EU, 0xFCU, 0x8CU, 0xE0U, 0xBAU, 0x8AU, 0x4FU, 0xE8U, 0xB6U, 0x85U, 0x5DU, 0xFEU, 0x72U,
+    0xB0U, 0xA6U, 0x6EU, 0xDEU, 0xD2U, 0xFBU, 0xABU, 0xFBU, 0xE5U, 0x8AU, 0x30U, 0xFAU, 0xFAU,
+    0xBEU, 0x1CU, 0x5DU, 0x71U, 0xA8U, 0x7EU, 0x2FU, 0x74U, 0x1EU, 0xF8U, 0xC1U, 0xFEU, 0x86U,
+    0xFEU, 0xA6U, 0xBBU, 0xFDU, 0xE5U, 0x30U, 0x67U, 0x7FU, 0x0DU, 0x97U, 0xD1U, 0x1DU, 0x49U,
+    0xF7U, 0xA8U, 0x44U, 0x3DU, 0x08U, 0x22U, 0xE5U, 0x06U, 0xA9U, 0xF4U, 0x61U, 0x4EU, 0x01U,
+    0x1EU, 0x2AU, 0x94U, 0x83U, 0x8FU, 0xF8U, 0x8CU, 0xD6U, 0x8CU, 0x8BU, 0xB7U, 0xC5U, 0xC6U,
+    0x42U, 0x4CU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU
   };
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_K256_PrecompTable.h b/include/msvc/internal/Hacl_K256_PrecompTable.h
index 26bdfa1f..ff15f1c9 100644
--- a/include/msvc/internal/Hacl_K256_PrecompTable.h
+++ b/include/msvc/internal/Hacl_K256_PrecompTable.h
@@ -39,498 +39,378 @@ static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_basepoint_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)705178180786072U,
-    (uint64_t)3855836460717471U, (uint64_t)4089131105950716U, (uint64_t)3301581525494108U,
-    (uint64_t)133858670344668U, (uint64_t)2199641648059576U, (uint64_t)1278080618437060U,
-    (uint64_t)3959378566518708U, (uint64_t)3455034269351872U, (uint64_t)79417610544803U,
-    (uint64_t)1U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)1282049064345544U, (uint64_t)971732600440099U, (uint64_t)1014594595727339U,
-    (uint64_t)4392159187541980U, (uint64_t)268327875692285U, (uint64_t)2411661712280539U,
-    (uint64_t)1092576199280126U, (uint64_t)4328619610718051U, (uint64_t)3535440816471627U,
-    (uint64_t)95182251488556U, (uint64_t)1893725512243753U, (uint64_t)3619861457111820U,
-    (uint64_t)879374960417905U, (uint64_t)2868056058129113U, (uint64_t)273195291893682U,
-    (uint64_t)2044797305960112U, (uint64_t)2357106853933780U, (uint64_t)3563112438336058U,
-    (uint64_t)2430811541762558U, (uint64_t)106443809495428U, (uint64_t)2231357633909668U,
-    (uint64_t)3641705835951936U, (uint64_t)80642569314189U, (uint64_t)2254841882373268U,
-    (uint64_t)149848031966573U, (uint64_t)2304615661367764U, (uint64_t)2410957403736446U,
-    (uint64_t)2712754805859804U, (uint64_t)2440183877540536U, (uint64_t)99784623895865U,
-    (uint64_t)3667773127482758U, (uint64_t)1354899394473308U, (uint64_t)3636602998800808U,
-    (uint64_t)2709296679846364U, (uint64_t)7253362091963U, (uint64_t)3585950735562744U,
-    (uint64_t)935775991758415U, (uint64_t)4108078106735201U, (uint64_t)556081800336307U,
-    (uint64_t)229585977163057U, (uint64_t)4055594186679801U, (uint64_t)1767681004944933U,
-    (uint64_t)1432634922083242U, (uint64_t)534935602949197U, (uint64_t)251753159522567U,
-    (uint64_t)2846474078499321U, (uint64_t)4488649590348702U, (uint64_t)2437476916025038U,
-    (uint64_t)3040577412822874U, (uint64_t)79405234918614U, (uint64_t)3030621226551508U,
-    (uint64_t)2801117003929806U, (uint64_t)1642927515498422U, (uint64_t)2802725079726297U,
-    (uint64_t)8472780626107U, (uint64_t)866068070352655U, (uint64_t)188080768545106U,
-    (uint64_t)2152119998903058U, (uint64_t)3391239985029665U, (uint64_t)23820026013564U,
-    (uint64_t)2965064154891949U, (uint64_t)1846516097921398U, (uint64_t)4418379948133146U,
-    (uint64_t)3137755426942400U, (uint64_t)47705291301781U, (uint64_t)4278533051105665U,
-    (uint64_t)3453643211214931U, (uint64_t)3379734319145156U, (uint64_t)3762442192097039U,
-    (uint64_t)40243003528694U, (uint64_t)4063448994211201U, (uint64_t)5697015368785U,
-    (uint64_t)1006545411838613U, (uint64_t)4242291693755210U, (uint64_t)135184629190512U,
-    (uint64_t)264898689131035U, (uint64_t)611796474823597U, (uint64_t)3255382250029089U,
-    (uint64_t)3490429246984696U, (uint64_t)236558595864362U, (uint64_t)2055934691551704U,
-    (uint64_t)1487711670114502U, (uint64_t)1823930698221632U, (uint64_t)2130937287438472U,
-    (uint64_t)154610053389779U, (uint64_t)2746573287023216U, (uint64_t)2430987262221221U,
-    (uint64_t)1668741642878689U, (uint64_t)904982541243977U, (uint64_t)56087343124948U,
-    (uint64_t)393905062353536U, (uint64_t)412681877350188U, (uint64_t)3153602040979977U,
-    (uint64_t)4466820876224989U, (uint64_t)146579165617857U, (uint64_t)2628741216508991U,
-    (uint64_t)747994231529806U, (uint64_t)750506569317681U, (uint64_t)1887492790748779U,
-    (uint64_t)35259008682771U, (uint64_t)2085116434894208U, (uint64_t)543291398921711U,
-    (uint64_t)1144362007901552U, (uint64_t)679305136036846U, (uint64_t)141090902244489U,
-    (uint64_t)632480954474859U, (uint64_t)2384513102652591U, (uint64_t)2225529790159790U,
-    (uint64_t)692258664851625U, (uint64_t)198681843567699U, (uint64_t)2397092587228181U,
-    (uint64_t)145862822166614U, (uint64_t)196976540479452U, (uint64_t)3321831130141455U,
-    (uint64_t)69266673089832U, (uint64_t)4469644227342284U, (uint64_t)3899271145504796U,
-    (uint64_t)1261890974076660U, (uint64_t)525357673886694U, (uint64_t)182135997828583U,
-    (uint64_t)4292760618810332U, (uint64_t)3404186545541683U, (uint64_t)312297386688768U,
-    (uint64_t)204377466824608U, (uint64_t)230900767857952U, (uint64_t)3871485172339693U,
-    (uint64_t)779449329662955U, (uint64_t)978655822464694U, (uint64_t)2278252139594027U,
-    (uint64_t)104641527040382U, (uint64_t)3528840153625765U, (uint64_t)4484699080275273U,
-    (uint64_t)1463971951102316U, (uint64_t)4013910812844749U, (uint64_t)228915589433620U,
-    (uint64_t)1209641433482461U, (uint64_t)4043178788774759U, (uint64_t)3008668238856634U,
-    (uint64_t)1448425089071412U, (uint64_t)26269719725037U, (uint64_t)3330785027545223U,
-    (uint64_t)852657975349259U, (uint64_t)227245054466105U, (uint64_t)1534632353984777U,
-    (uint64_t)207715098574660U, (uint64_t)3209837527352280U, (uint64_t)4051688046309066U,
-    (uint64_t)3839009590725955U, (uint64_t)1321506437398842U, (uint64_t)68340219159928U,
-    (uint64_t)1806950276956275U, (uint64_t)3923908055275295U, (uint64_t)743963253393575U,
-    (uint64_t)42162407478783U, (uint64_t)261334584474610U, (uint64_t)3728224928885214U,
-    (uint64_t)4004701081842869U, (uint64_t)709043201644674U, (uint64_t)4267294249150171U,
-    (uint64_t)255540582975025U, (uint64_t)875490593722211U, (uint64_t)796393708218375U,
-    (uint64_t)14774425627956U, (uint64_t)1500040516752097U, (uint64_t)141076627721678U,
-    (uint64_t)2634539368480628U, (uint64_t)1106488853550103U, (uint64_t)2346231921151930U,
-    (uint64_t)897108283954283U, (uint64_t)64616679559843U, (uint64_t)400244949840943U,
-    (uint64_t)1731263826831733U, (uint64_t)1649996579904651U, (uint64_t)3643693449640761U,
-    (uint64_t)172543068638991U, (uint64_t)329537981097182U, (uint64_t)2029799860802869U,
-    (uint64_t)4377737515208862U, (uint64_t)29103311051334U, (uint64_t)265583594111499U,
-    (uint64_t)3798074876561255U, (uint64_t)184749333259352U, (uint64_t)3117395073661801U,
-    (uint64_t)3695784565008833U, (uint64_t)64282709896721U, (uint64_t)1618968913246422U,
-    (uint64_t)3185235128095257U, (uint64_t)3288745068118692U, (uint64_t)1963818603508782U,
-    (uint64_t)281054350739495U, (uint64_t)1658639050810346U, (uint64_t)3061097601679552U,
-    (uint64_t)3023781433263746U, (uint64_t)2770283391242475U, (uint64_t)144508864751908U,
-    (uint64_t)173576288079856U, (uint64_t)46114579547054U, (uint64_t)1679480127300211U,
-    (uint64_t)1683062051644007U, (uint64_t)117183826129323U, (uint64_t)1894068608117440U,
-    (uint64_t)3846899838975733U, (uint64_t)4289279019496192U, (uint64_t)176995887914031U,
-    (uint64_t)78074942938713U, (uint64_t)454207263265292U, (uint64_t)972683614054061U,
-    (uint64_t)808474205144361U, (uint64_t)942703935951735U, (uint64_t)134460241077887U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    705178180786072ULL, 3855836460717471ULL, 4089131105950716ULL, 3301581525494108ULL,
+    133858670344668ULL, 2199641648059576ULL, 1278080618437060ULL, 3959378566518708ULL,
+    3455034269351872ULL, 79417610544803ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1282049064345544ULL,
+    971732600440099ULL, 1014594595727339ULL, 4392159187541980ULL, 268327875692285ULL,
+    2411661712280539ULL, 1092576199280126ULL, 4328619610718051ULL, 3535440816471627ULL,
+    95182251488556ULL, 1893725512243753ULL, 3619861457111820ULL, 879374960417905ULL,
+    2868056058129113ULL, 273195291893682ULL, 2044797305960112ULL, 2357106853933780ULL,
+    3563112438336058ULL, 2430811541762558ULL, 106443809495428ULL, 2231357633909668ULL,
+    3641705835951936ULL, 80642569314189ULL, 2254841882373268ULL, 149848031966573ULL,
+    2304615661367764ULL, 2410957403736446ULL, 2712754805859804ULL, 2440183877540536ULL,
+    99784623895865ULL, 3667773127482758ULL, 1354899394473308ULL, 3636602998800808ULL,
+    2709296679846364ULL, 7253362091963ULL, 3585950735562744ULL, 935775991758415ULL,
+    4108078106735201ULL, 556081800336307ULL, 229585977163057ULL, 4055594186679801ULL,
+    1767681004944933ULL, 1432634922083242ULL, 534935602949197ULL, 251753159522567ULL,
+    2846474078499321ULL, 4488649590348702ULL, 2437476916025038ULL, 3040577412822874ULL,
+    79405234918614ULL, 3030621226551508ULL, 2801117003929806ULL, 1642927515498422ULL,
+    2802725079726297ULL, 8472780626107ULL, 866068070352655ULL, 188080768545106ULL,
+    2152119998903058ULL, 3391239985029665ULL, 23820026013564ULL, 2965064154891949ULL,
+    1846516097921398ULL, 4418379948133146ULL, 3137755426942400ULL, 47705291301781ULL,
+    4278533051105665ULL, 3453643211214931ULL, 3379734319145156ULL, 3762442192097039ULL,
+    40243003528694ULL, 4063448994211201ULL, 5697015368785ULL, 1006545411838613ULL,
+    4242291693755210ULL, 135184629190512ULL, 264898689131035ULL, 611796474823597ULL,
+    3255382250029089ULL, 3490429246984696ULL, 236558595864362ULL, 2055934691551704ULL,
+    1487711670114502ULL, 1823930698221632ULL, 2130937287438472ULL, 154610053389779ULL,
+    2746573287023216ULL, 2430987262221221ULL, 1668741642878689ULL, 904982541243977ULL,
+    56087343124948ULL, 393905062353536ULL, 412681877350188ULL, 3153602040979977ULL,
+    4466820876224989ULL, 146579165617857ULL, 2628741216508991ULL, 747994231529806ULL,
+    750506569317681ULL, 1887492790748779ULL, 35259008682771ULL, 2085116434894208ULL,
+    543291398921711ULL, 1144362007901552ULL, 679305136036846ULL, 141090902244489ULL,
+    632480954474859ULL, 2384513102652591ULL, 2225529790159790ULL, 692258664851625ULL,
+    198681843567699ULL, 2397092587228181ULL, 145862822166614ULL, 196976540479452ULL,
+    3321831130141455ULL, 69266673089832ULL, 4469644227342284ULL, 3899271145504796ULL,
+    1261890974076660ULL, 525357673886694ULL, 182135997828583ULL, 4292760618810332ULL,
+    3404186545541683ULL, 312297386688768ULL, 204377466824608ULL, 230900767857952ULL,
+    3871485172339693ULL, 779449329662955ULL, 978655822464694ULL, 2278252139594027ULL,
+    104641527040382ULL, 3528840153625765ULL, 4484699080275273ULL, 1463971951102316ULL,
+    4013910812844749ULL, 228915589433620ULL, 1209641433482461ULL, 4043178788774759ULL,
+    3008668238856634ULL, 1448425089071412ULL, 26269719725037ULL, 3330785027545223ULL,
+    852657975349259ULL, 227245054466105ULL, 1534632353984777ULL, 207715098574660ULL,
+    3209837527352280ULL, 4051688046309066ULL, 3839009590725955ULL, 1321506437398842ULL,
+    68340219159928ULL, 1806950276956275ULL, 3923908055275295ULL, 743963253393575ULL,
+    42162407478783ULL, 261334584474610ULL, 3728224928885214ULL, 4004701081842869ULL,
+    709043201644674ULL, 4267294249150171ULL, 255540582975025ULL, 875490593722211ULL,
+    796393708218375ULL, 14774425627956ULL, 1500040516752097ULL, 141076627721678ULL,
+    2634539368480628ULL, 1106488853550103ULL, 2346231921151930ULL, 897108283954283ULL,
+    64616679559843ULL, 400244949840943ULL, 1731263826831733ULL, 1649996579904651ULL,
+    3643693449640761ULL, 172543068638991ULL, 329537981097182ULL, 2029799860802869ULL,
+    4377737515208862ULL, 29103311051334ULL, 265583594111499ULL, 3798074876561255ULL,
+    184749333259352ULL, 3117395073661801ULL, 3695784565008833ULL, 64282709896721ULL,
+    1618968913246422ULL, 3185235128095257ULL, 3288745068118692ULL, 1963818603508782ULL,
+    281054350739495ULL, 1658639050810346ULL, 3061097601679552ULL, 3023781433263746ULL,
+    2770283391242475ULL, 144508864751908ULL, 173576288079856ULL, 46114579547054ULL,
+    1679480127300211ULL, 1683062051644007ULL, 117183826129323ULL, 1894068608117440ULL,
+    3846899838975733ULL, 4289279019496192ULL, 176995887914031ULL, 78074942938713ULL,
+    454207263265292ULL, 972683614054061ULL, 808474205144361ULL, 942703935951735ULL,
+    134460241077887ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)4496295042185355U,
-    (uint64_t)3125448202219451U, (uint64_t)1239608518490046U, (uint64_t)2687445637493112U,
-    (uint64_t)77979604880139U, (uint64_t)3360310474215011U, (uint64_t)1216410458165163U,
-    (uint64_t)177901593587973U, (uint64_t)3209978938104985U, (uint64_t)118285133003718U,
-    (uint64_t)434519962075150U, (uint64_t)1114612377498854U, (uint64_t)3488596944003813U,
-    (uint64_t)450716531072892U, (uint64_t)66044973203836U, (uint64_t)2822827191156652U,
-    (uint64_t)2417714248626059U, (uint64_t)2173117567943U, (uint64_t)961513119252459U,
-    (uint64_t)233852556538333U, (uint64_t)3014783730323962U, (uint64_t)2955192634004574U,
-    (uint64_t)580546524951282U, (uint64_t)2982973948711252U, (uint64_t)226295722018730U,
-    (uint64_t)26457116218543U, (uint64_t)3401523493637663U, (uint64_t)2597746825024790U,
-    (uint64_t)1789211180483113U, (uint64_t)155862365823427U, (uint64_t)4056806876632134U,
-    (uint64_t)1742291745730568U, (uint64_t)3527759000626890U, (uint64_t)3740578471192596U,
-    (uint64_t)177295097700537U, (uint64_t)1533961415657770U, (uint64_t)4305228982382487U,
-    (uint64_t)4069090871282711U, (uint64_t)4090877481646667U, (uint64_t)220939617041498U,
-    (uint64_t)2057548127959588U, (uint64_t)45185623103252U, (uint64_t)2871963270423449U,
-    (uint64_t)3312974792248749U, (uint64_t)8710601879528U, (uint64_t)570612225194540U,
-    (uint64_t)2045632925323972U, (uint64_t)1263913878297555U, (uint64_t)1294592284757719U,
-    (uint64_t)238067747295054U, (uint64_t)1576659948829386U, (uint64_t)2315159636629917U,
-    (uint64_t)3624867787891655U, (uint64_t)647628266663887U, (uint64_t)75788399640253U,
-    (uint64_t)710811707847797U, (uint64_t)130020650130128U, (uint64_t)1975045425972589U,
-    (uint64_t)136351545314094U, (uint64_t)229292031212337U, (uint64_t)1061471455264148U,
-    (uint64_t)3281312694184822U, (uint64_t)1692442293921797U, (uint64_t)4171008525509513U,
-    (uint64_t)275424696197549U, (uint64_t)1170296303921965U, (uint64_t)4154092952807735U,
-    (uint64_t)4371262070870741U, (uint64_t)835769811036496U, (uint64_t)275812646528189U,
-    (uint64_t)4006745785521764U, (uint64_t)1965172239781114U, (uint64_t)4121055644916429U,
-    (uint64_t)3578995380229569U, (uint64_t)169798870760022U, (uint64_t)1834234783016431U,
-    (uint64_t)3186919121688538U, (uint64_t)1894269993170652U, (uint64_t)868603832348691U,
-    (uint64_t)110978471368876U, (uint64_t)1659296605881532U, (uint64_t)3257830829309297U,
-    (uint64_t)3381509832701119U, (uint64_t)4016163121121296U, (uint64_t)265240263496294U,
-    (uint64_t)4411285343933251U, (uint64_t)728746770806400U, (uint64_t)1767819098558739U,
-    (uint64_t)3002081480892841U, (uint64_t)96312133241935U, (uint64_t)468184501392107U,
-    (uint64_t)2061529496271208U, (uint64_t)801565111628867U, (uint64_t)3380678576799273U,
-    (uint64_t)121814978170941U, (uint64_t)3340363319165433U, (uint64_t)2764604325746928U,
-    (uint64_t)4475755976431968U, (uint64_t)3678073419927081U, (uint64_t)237001357924061U,
-    (uint64_t)4110487014553450U, (uint64_t)442517757833404U, (uint64_t)3976758767423859U,
-    (uint64_t)2559863799262476U, (uint64_t)178144664279213U, (uint64_t)2488702171798051U,
-    (uint64_t)4292079598620208U, (uint64_t)1642918280217329U, (uint64_t)3694920319798108U,
-    (uint64_t)111735528281657U, (uint64_t)2904433967156033U, (uint64_t)4391518032143166U,
-    (uint64_t)3018885875516259U, (uint64_t)3730342681447122U, (uint64_t)10320273322750U,
-    (uint64_t)555845881555519U, (uint64_t)58355404017985U, (uint64_t)379009359053696U,
-    (uint64_t)450317203955503U, (uint64_t)271063299686173U, (uint64_t)910340241794202U,
-    (uint64_t)4145234574853890U, (uint64_t)2059755654702755U, (uint64_t)626530377112246U,
-    (uint64_t)188918989156857U, (uint64_t)3316657461542117U, (uint64_t)778033563170765U,
-    (uint64_t)3568562306532187U, (uint64_t)2888619469733481U, (uint64_t)4364919962337U,
-    (uint64_t)4095057288587059U, (uint64_t)2275461355379988U, (uint64_t)1507422995910897U,
-    (uint64_t)3737691697116252U, (uint64_t)28779913258578U, (uint64_t)131453301647952U,
-    (uint64_t)3613515597508469U, (uint64_t)2389606941441321U, (uint64_t)2135459302594806U,
-    (uint64_t)105517262484263U, (uint64_t)2973432939331401U, (uint64_t)3447096622477885U,
-    (uint64_t)684654106536844U, (uint64_t)2815198316729695U, (uint64_t)280303067216071U,
-    (uint64_t)1841014812927024U, (uint64_t)1181026273060917U, (uint64_t)4092989148457730U,
-    (uint64_t)1381045116206278U, (uint64_t)112475725893965U, (uint64_t)2309144740156686U,
-    (uint64_t)1558825847609352U, (uint64_t)2008068002046292U, (uint64_t)3153511625856423U,
-    (uint64_t)38469701427673U, (uint64_t)4240572315518056U, (uint64_t)2295170987320580U,
-    (uint64_t)187734093837094U, (uint64_t)301041528077172U, (uint64_t)234553141005715U,
-    (uint64_t)4170513699279606U, (uint64_t)1600132848196146U, (uint64_t)3149113064155689U,
-    (uint64_t)2733255352600949U, (uint64_t)144915931419495U, (uint64_t)1221012073888926U,
-    (uint64_t)4395668111081710U, (uint64_t)2464799161496070U, (uint64_t)3664256125241313U,
-    (uint64_t)239705368981290U, (uint64_t)1415181408539490U, (uint64_t)2551836620449074U,
-    (uint64_t)3003106895689578U, (uint64_t)968947218886924U, (uint64_t)270781532362673U,
-    (uint64_t)2905980714350372U, (uint64_t)3246927349288975U, (uint64_t)2653377642686974U,
-    (uint64_t)1577457093418263U, (uint64_t)279488238785848U, (uint64_t)568335962564552U,
-    (uint64_t)4251365041645758U, (uint64_t)1257832559776007U, (uint64_t)2424022444243863U,
-    (uint64_t)261166122046343U, (uint64_t)4399874608082116U, (uint64_t)640509987891568U,
-    (uint64_t)3119706885332220U, (uint64_t)1990185416694007U, (uint64_t)119390098529341U,
-    (uint64_t)220106534694050U, (uint64_t)937225880034895U, (uint64_t)656288151358882U,
-    (uint64_t)1766967254772100U, (uint64_t)197900790969750U, (uint64_t)2992539221608875U,
-    (uint64_t)3960297171111858U, (uint64_t)3499202002925081U, (uint64_t)1103060980924705U,
-    (uint64_t)13670895919578U, (uint64_t)430132744187721U, (uint64_t)1206771838050953U,
-    (uint64_t)2474749300167198U, (uint64_t)296299539510780U, (uint64_t)61565517686436U,
-    (uint64_t)752778559080573U, (uint64_t)3049015829565410U, (uint64_t)3538647632527371U,
-    (uint64_t)1640473028662032U, (uint64_t)182488721849306U, (uint64_t)1234378482161516U,
-    (uint64_t)3736205988606381U, (uint64_t)2814216844344487U, (uint64_t)3877249891529557U,
-    (uint64_t)51681412928433U, (uint64_t)4275336620301239U, (uint64_t)3084074032750651U,
-    (uint64_t)42732308350456U, (uint64_t)3648603591552229U, (uint64_t)142450621701603U,
-    (uint64_t)4020045475009854U, (uint64_t)1050293952073054U, (uint64_t)1974773673079851U,
-    (uint64_t)1815515638724020U, (uint64_t)104845375825434U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    4496295042185355ULL, 3125448202219451ULL, 1239608518490046ULL, 2687445637493112ULL,
+    77979604880139ULL, 3360310474215011ULL, 1216410458165163ULL, 177901593587973ULL,
+    3209978938104985ULL, 118285133003718ULL, 434519962075150ULL, 1114612377498854ULL,
+    3488596944003813ULL, 450716531072892ULL, 66044973203836ULL, 2822827191156652ULL,
+    2417714248626059ULL, 2173117567943ULL, 961513119252459ULL, 233852556538333ULL,
+    3014783730323962ULL, 2955192634004574ULL, 580546524951282ULL, 2982973948711252ULL,
+    226295722018730ULL, 26457116218543ULL, 3401523493637663ULL, 2597746825024790ULL,
+    1789211180483113ULL, 155862365823427ULL, 4056806876632134ULL, 1742291745730568ULL,
+    3527759000626890ULL, 3740578471192596ULL, 177295097700537ULL, 1533961415657770ULL,
+    4305228982382487ULL, 4069090871282711ULL, 4090877481646667ULL, 220939617041498ULL,
+    2057548127959588ULL, 45185623103252ULL, 2871963270423449ULL, 3312974792248749ULL,
+    8710601879528ULL, 570612225194540ULL, 2045632925323972ULL, 1263913878297555ULL,
+    1294592284757719ULL, 238067747295054ULL, 1576659948829386ULL, 2315159636629917ULL,
+    3624867787891655ULL, 647628266663887ULL, 75788399640253ULL, 710811707847797ULL,
+    130020650130128ULL, 1975045425972589ULL, 136351545314094ULL, 229292031212337ULL,
+    1061471455264148ULL, 3281312694184822ULL, 1692442293921797ULL, 4171008525509513ULL,
+    275424696197549ULL, 1170296303921965ULL, 4154092952807735ULL, 4371262070870741ULL,
+    835769811036496ULL, 275812646528189ULL, 4006745785521764ULL, 1965172239781114ULL,
+    4121055644916429ULL, 3578995380229569ULL, 169798870760022ULL, 1834234783016431ULL,
+    3186919121688538ULL, 1894269993170652ULL, 868603832348691ULL, 110978471368876ULL,
+    1659296605881532ULL, 3257830829309297ULL, 3381509832701119ULL, 4016163121121296ULL,
+    265240263496294ULL, 4411285343933251ULL, 728746770806400ULL, 1767819098558739ULL,
+    3002081480892841ULL, 96312133241935ULL, 468184501392107ULL, 2061529496271208ULL,
+    801565111628867ULL, 3380678576799273ULL, 121814978170941ULL, 3340363319165433ULL,
+    2764604325746928ULL, 4475755976431968ULL, 3678073419927081ULL, 237001357924061ULL,
+    4110487014553450ULL, 442517757833404ULL, 3976758767423859ULL, 2559863799262476ULL,
+    178144664279213ULL, 2488702171798051ULL, 4292079598620208ULL, 1642918280217329ULL,
+    3694920319798108ULL, 111735528281657ULL, 2904433967156033ULL, 4391518032143166ULL,
+    3018885875516259ULL, 3730342681447122ULL, 10320273322750ULL, 555845881555519ULL,
+    58355404017985ULL, 379009359053696ULL, 450317203955503ULL, 271063299686173ULL,
+    910340241794202ULL, 4145234574853890ULL, 2059755654702755ULL, 626530377112246ULL,
+    188918989156857ULL, 3316657461542117ULL, 778033563170765ULL, 3568562306532187ULL,
+    2888619469733481ULL, 4364919962337ULL, 4095057288587059ULL, 2275461355379988ULL,
+    1507422995910897ULL, 3737691697116252ULL, 28779913258578ULL, 131453301647952ULL,
+    3613515597508469ULL, 2389606941441321ULL, 2135459302594806ULL, 105517262484263ULL,
+    2973432939331401ULL, 3447096622477885ULL, 684654106536844ULL, 2815198316729695ULL,
+    280303067216071ULL, 1841014812927024ULL, 1181026273060917ULL, 4092989148457730ULL,
+    1381045116206278ULL, 112475725893965ULL, 2309144740156686ULL, 1558825847609352ULL,
+    2008068002046292ULL, 3153511625856423ULL, 38469701427673ULL, 4240572315518056ULL,
+    2295170987320580ULL, 187734093837094ULL, 301041528077172ULL, 234553141005715ULL,
+    4170513699279606ULL, 1600132848196146ULL, 3149113064155689ULL, 2733255352600949ULL,
+    144915931419495ULL, 1221012073888926ULL, 4395668111081710ULL, 2464799161496070ULL,
+    3664256125241313ULL, 239705368981290ULL, 1415181408539490ULL, 2551836620449074ULL,
+    3003106895689578ULL, 968947218886924ULL, 270781532362673ULL, 2905980714350372ULL,
+    3246927349288975ULL, 2653377642686974ULL, 1577457093418263ULL, 279488238785848ULL,
+    568335962564552ULL, 4251365041645758ULL, 1257832559776007ULL, 2424022444243863ULL,
+    261166122046343ULL, 4399874608082116ULL, 640509987891568ULL, 3119706885332220ULL,
+    1990185416694007ULL, 119390098529341ULL, 220106534694050ULL, 937225880034895ULL,
+    656288151358882ULL, 1766967254772100ULL, 197900790969750ULL, 2992539221608875ULL,
+    3960297171111858ULL, 3499202002925081ULL, 1103060980924705ULL, 13670895919578ULL,
+    430132744187721ULL, 1206771838050953ULL, 2474749300167198ULL, 296299539510780ULL,
+    61565517686436ULL, 752778559080573ULL, 3049015829565410ULL, 3538647632527371ULL,
+    1640473028662032ULL, 182488721849306ULL, 1234378482161516ULL, 3736205988606381ULL,
+    2814216844344487ULL, 3877249891529557ULL, 51681412928433ULL, 4275336620301239ULL,
+    3084074032750651ULL, 42732308350456ULL, 3648603591552229ULL, 142450621701603ULL,
+    4020045475009854ULL, 1050293952073054ULL, 1974773673079851ULL, 1815515638724020ULL,
+    104845375825434ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1277614565900951U,
-    (uint64_t)378671684419493U, (uint64_t)3176260448102880U, (uint64_t)1575691435565077U,
-    (uint64_t)167304528382180U, (uint64_t)2600787765776588U, (uint64_t)7497946149293U,
-    (uint64_t)2184272641272202U, (uint64_t)2200235265236628U, (uint64_t)265969268774814U,
-    (uint64_t)1913228635640715U, (uint64_t)2831959046949342U, (uint64_t)888030405442963U,
-    (uint64_t)1817092932985033U, (uint64_t)101515844997121U, (uint64_t)3309468394859588U,
-    (uint64_t)3965334773689948U, (uint64_t)1945272965790738U, (uint64_t)4450939211427964U,
-    (uint64_t)211349698782702U, (uint64_t)2085160302160079U, (uint64_t)212812506072603U,
-    (uint64_t)3646122434511764U, (uint64_t)1711405092320514U, (uint64_t)95160920508464U,
-    (uint64_t)1677683368518073U, (uint64_t)4384656939250953U, (uint64_t)3548591046529893U,
-    (uint64_t)1683233536091384U, (uint64_t)105919586159941U, (uint64_t)1941416002726455U,
-    (uint64_t)246264372248216U, (uint64_t)3063044110922228U, (uint64_t)3772292170415825U,
-    (uint64_t)222933374989815U, (uint64_t)2417211163452935U, (uint64_t)2018230365573200U,
-    (uint64_t)1985974538911047U, (uint64_t)1387197705332739U, (uint64_t)186400825584956U,
-    (uint64_t)2469330487750329U, (uint64_t)1291983813301638U, (uint64_t)333416733706302U,
-    (uint64_t)3413315564261070U, (uint64_t)189444777569683U, (uint64_t)1062005622360420U,
-    (uint64_t)1800197715938740U, (uint64_t)3693110992551647U, (uint64_t)626990328941945U,
-    (uint64_t)40998857100520U, (uint64_t)3921983552805085U, (uint64_t)1016632437340656U,
-    (uint64_t)4016615929950878U, (uint64_t)2682554586771281U, (uint64_t)7043555162389U,
-    (uint64_t)3333819830676567U, (uint64_t)4120091964944036U, (uint64_t)1960788263484015U,
-    (uint64_t)1642145656273304U, (uint64_t)252814075789128U, (uint64_t)3085777342821357U,
-    (uint64_t)4166637997604052U, (uint64_t)1339401689756469U, (uint64_t)845938529607551U,
-    (uint64_t)223351828189283U, (uint64_t)1148648705186890U, (uint64_t)1230525014760605U,
-    (uint64_t)1869739475126720U, (uint64_t)4193966261205530U, (uint64_t)175684010336013U,
-    (uint64_t)4476719358931508U, (uint64_t)4209547487457638U, (uint64_t)2197536411673724U,
-    (uint64_t)3010838433412303U, (uint64_t)169318997251483U, (uint64_t)49493868302162U,
-    (uint64_t)3594601099078584U, (uint64_t)3662420905445942U, (uint64_t)3606544932233685U,
-    (uint64_t)270643652662165U, (uint64_t)180681786228544U, (uint64_t)2095882682308564U,
-    (uint64_t)813484483841391U, (uint64_t)1622665392824698U, (uint64_t)113821770225137U,
-    (uint64_t)3075432444115417U, (uint64_t)716502989978722U, (uint64_t)2304779892217245U,
-    (uint64_t)1760144151770127U, (uint64_t)235719156963938U, (uint64_t)3180013070471143U,
-    (uint64_t)1331027634540579U, (uint64_t)552273022992392U, (uint64_t)2858693077461887U,
-    (uint64_t)197914407731510U, (uint64_t)187252310910959U, (uint64_t)4160637171377125U,
-    (uint64_t)3225059526713298U, (uint64_t)2574558217383978U, (uint64_t)249695600622489U,
-    (uint64_t)364988742814327U, (uint64_t)4245298536326258U, (uint64_t)1812464706589342U,
-    (uint64_t)2734857123772998U, (uint64_t)120105577124628U, (uint64_t)160179251271109U,
-    (uint64_t)3604555733307834U, (uint64_t)150380003195715U, (uint64_t)1574304909935121U,
-    (uint64_t)142190285600761U, (uint64_t)1835385847725651U, (uint64_t)3168087139615901U,
-    (uint64_t)3201434861713736U, (uint64_t)741757984537760U, (uint64_t)163585009419543U,
-    (uint64_t)3837997981109783U, (uint64_t)3771946407870997U, (uint64_t)2867641360295452U,
-    (uint64_t)3097548691501578U, (uint64_t)124624912142104U, (uint64_t)2729896088769328U,
-    (uint64_t)1087786827035225U, (uint64_t)3934000813818614U, (uint64_t)1176792318645055U,
-    (uint64_t)125311882169270U, (uint64_t)3530709439299502U, (uint64_t)1561477829834527U,
-    (uint64_t)3927894570196761U, (uint64_t)3957765307669212U, (uint64_t)105720519513730U,
-    (uint64_t)3758969845816997U, (uint64_t)2738320452287300U, (uint64_t)2380753632109507U,
-    (uint64_t)2762090901149075U, (uint64_t)123455059136515U, (uint64_t)4222807813169807U,
-    (uint64_t)118064783651432U, (uint64_t)2877694712254934U, (uint64_t)3535027426396448U,
-    (uint64_t)100175663703417U, (uint64_t)3287921121213155U, (uint64_t)4497246481824206U,
-    (uint64_t)1960809949007025U, (uint64_t)3236854264159102U, (uint64_t)35028112623717U,
-    (uint64_t)338838627913273U, (uint64_t)2827531947914645U, (uint64_t)4231826783810670U,
-    (uint64_t)1082490106100389U, (uint64_t)13267544387448U, (uint64_t)4249975884259105U,
-    (uint64_t)2844862161652484U, (uint64_t)262742197948971U, (uint64_t)3525653802457116U,
-    (uint64_t)269963889261701U, (uint64_t)3690062482117102U, (uint64_t)675413453822147U,
-    (uint64_t)2170937868437574U, (uint64_t)2367632187022010U, (uint64_t)214032802409445U,
-    (uint64_t)2054007379612477U, (uint64_t)3558050826739009U, (uint64_t)266827184752634U,
-    (uint64_t)1946520293291195U, (uint64_t)238087872386556U, (uint64_t)490056555385700U,
-    (uint64_t)794405769357386U, (uint64_t)3886901294859702U, (uint64_t)3120414548626348U,
-    (uint64_t)84316625221136U, (uint64_t)223073962531835U, (uint64_t)4280846460577631U,
-    (uint64_t)344296282849308U, (uint64_t)3522116652699457U, (uint64_t)171817232053075U,
-    (uint64_t)3296636283062273U, (uint64_t)3587303364425579U, (uint64_t)1033485783633331U,
-    (uint64_t)3686984130812906U, (uint64_t)268290803650477U, (uint64_t)2803988215834467U,
-    (uint64_t)3821246410529720U, (uint64_t)1077722388925870U, (uint64_t)4187137036866164U,
-    (uint64_t)104696540795905U, (uint64_t)998770003854764U, (uint64_t)3960768137535019U,
-    (uint64_t)4293792474919135U, (uint64_t)3251297981727034U, (uint64_t)192479028790101U,
-    (uint64_t)1175880869349935U, (uint64_t)3506949259311937U, (uint64_t)2161711516160714U,
-    (uint64_t)2506820922270187U, (uint64_t)131002200661047U, (uint64_t)3532399477339994U,
-    (uint64_t)2515815721228719U, (uint64_t)4274974119021502U, (uint64_t)265752394510924U,
-    (uint64_t)163144272153395U, (uint64_t)2824260010502991U, (uint64_t)517077012665142U,
-    (uint64_t)602987073882924U, (uint64_t)2939630061751780U, (uint64_t)59211609557440U,
-    (uint64_t)963423614549333U, (uint64_t)495476232754434U, (uint64_t)94274496109103U,
-    (uint64_t)2245136222990187U, (uint64_t)185414764872288U, (uint64_t)2266067668609289U,
-    (uint64_t)3873978896235927U, (uint64_t)4428283513152105U, (uint64_t)3881481480259312U,
-    (uint64_t)207746202010862U, (uint64_t)1609437858011364U, (uint64_t)477585758421515U,
-    (uint64_t)3850430788664649U, (uint64_t)2682299074459173U, (uint64_t)149439089751274U,
-    (uint64_t)3665760243877698U, (uint64_t)1356661512658931U, (uint64_t)1675903262368322U,
-    (uint64_t)3355649228050892U, (uint64_t)99772108898412U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    1277614565900951ULL, 378671684419493ULL, 3176260448102880ULL, 1575691435565077ULL,
+    167304528382180ULL, 2600787765776588ULL, 7497946149293ULL, 2184272641272202ULL,
+    2200235265236628ULL, 265969268774814ULL, 1913228635640715ULL, 2831959046949342ULL,
+    888030405442963ULL, 1817092932985033ULL, 101515844997121ULL, 3309468394859588ULL,
+    3965334773689948ULL, 1945272965790738ULL, 4450939211427964ULL, 211349698782702ULL,
+    2085160302160079ULL, 212812506072603ULL, 3646122434511764ULL, 1711405092320514ULL,
+    95160920508464ULL, 1677683368518073ULL, 4384656939250953ULL, 3548591046529893ULL,
+    1683233536091384ULL, 105919586159941ULL, 1941416002726455ULL, 246264372248216ULL,
+    3063044110922228ULL, 3772292170415825ULL, 222933374989815ULL, 2417211163452935ULL,
+    2018230365573200ULL, 1985974538911047ULL, 1387197705332739ULL, 186400825584956ULL,
+    2469330487750329ULL, 1291983813301638ULL, 333416733706302ULL, 3413315564261070ULL,
+    189444777569683ULL, 1062005622360420ULL, 1800197715938740ULL, 3693110992551647ULL,
+    626990328941945ULL, 40998857100520ULL, 3921983552805085ULL, 1016632437340656ULL,
+    4016615929950878ULL, 2682554586771281ULL, 7043555162389ULL, 3333819830676567ULL,
+    4120091964944036ULL, 1960788263484015ULL, 1642145656273304ULL, 252814075789128ULL,
+    3085777342821357ULL, 4166637997604052ULL, 1339401689756469ULL, 845938529607551ULL,
+    223351828189283ULL, 1148648705186890ULL, 1230525014760605ULL, 1869739475126720ULL,
+    4193966261205530ULL, 175684010336013ULL, 4476719358931508ULL, 4209547487457638ULL,
+    2197536411673724ULL, 3010838433412303ULL, 169318997251483ULL, 49493868302162ULL,
+    3594601099078584ULL, 3662420905445942ULL, 3606544932233685ULL, 270643652662165ULL,
+    180681786228544ULL, 2095882682308564ULL, 813484483841391ULL, 1622665392824698ULL,
+    113821770225137ULL, 3075432444115417ULL, 716502989978722ULL, 2304779892217245ULL,
+    1760144151770127ULL, 235719156963938ULL, 3180013070471143ULL, 1331027634540579ULL,
+    552273022992392ULL, 2858693077461887ULL, 197914407731510ULL, 187252310910959ULL,
+    4160637171377125ULL, 3225059526713298ULL, 2574558217383978ULL, 249695600622489ULL,
+    364988742814327ULL, 4245298536326258ULL, 1812464706589342ULL, 2734857123772998ULL,
+    120105577124628ULL, 160179251271109ULL, 3604555733307834ULL, 150380003195715ULL,
+    1574304909935121ULL, 142190285600761ULL, 1835385847725651ULL, 3168087139615901ULL,
+    3201434861713736ULL, 741757984537760ULL, 163585009419543ULL, 3837997981109783ULL,
+    3771946407870997ULL, 2867641360295452ULL, 3097548691501578ULL, 124624912142104ULL,
+    2729896088769328ULL, 1087786827035225ULL, 3934000813818614ULL, 1176792318645055ULL,
+    125311882169270ULL, 3530709439299502ULL, 1561477829834527ULL, 3927894570196761ULL,
+    3957765307669212ULL, 105720519513730ULL, 3758969845816997ULL, 2738320452287300ULL,
+    2380753632109507ULL, 2762090901149075ULL, 123455059136515ULL, 4222807813169807ULL,
+    118064783651432ULL, 2877694712254934ULL, 3535027426396448ULL, 100175663703417ULL,
+    3287921121213155ULL, 4497246481824206ULL, 1960809949007025ULL, 3236854264159102ULL,
+    35028112623717ULL, 338838627913273ULL, 2827531947914645ULL, 4231826783810670ULL,
+    1082490106100389ULL, 13267544387448ULL, 4249975884259105ULL, 2844862161652484ULL,
+    262742197948971ULL, 3525653802457116ULL, 269963889261701ULL, 3690062482117102ULL,
+    675413453822147ULL, 2170937868437574ULL, 2367632187022010ULL, 214032802409445ULL,
+    2054007379612477ULL, 3558050826739009ULL, 266827184752634ULL, 1946520293291195ULL,
+    238087872386556ULL, 490056555385700ULL, 794405769357386ULL, 3886901294859702ULL,
+    3120414548626348ULL, 84316625221136ULL, 223073962531835ULL, 4280846460577631ULL,
+    344296282849308ULL, 3522116652699457ULL, 171817232053075ULL, 3296636283062273ULL,
+    3587303364425579ULL, 1033485783633331ULL, 3686984130812906ULL, 268290803650477ULL,
+    2803988215834467ULL, 3821246410529720ULL, 1077722388925870ULL, 4187137036866164ULL,
+    104696540795905ULL, 998770003854764ULL, 3960768137535019ULL, 4293792474919135ULL,
+    3251297981727034ULL, 192479028790101ULL, 1175880869349935ULL, 3506949259311937ULL,
+    2161711516160714ULL, 2506820922270187ULL, 131002200661047ULL, 3532399477339994ULL,
+    2515815721228719ULL, 4274974119021502ULL, 265752394510924ULL, 163144272153395ULL,
+    2824260010502991ULL, 517077012665142ULL, 602987073882924ULL, 2939630061751780ULL,
+    59211609557440ULL, 963423614549333ULL, 495476232754434ULL, 94274496109103ULL,
+    2245136222990187ULL, 185414764872288ULL, 2266067668609289ULL, 3873978896235927ULL,
+    4428283513152105ULL, 3881481480259312ULL, 207746202010862ULL, 1609437858011364ULL,
+    477585758421515ULL, 3850430788664649ULL, 2682299074459173ULL, 149439089751274ULL,
+    3665760243877698ULL, 1356661512658931ULL, 1675903262368322ULL, 3355649228050892ULL,
+    99772108898412ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4[240U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)34056422761564U,
-    (uint64_t)3315864838337811U, (uint64_t)3797032336888745U, (uint64_t)2580641850480806U,
-    (uint64_t)208048944042500U, (uint64_t)1233795288689421U, (uint64_t)1048795233382631U,
-    (uint64_t)646545158071530U, (uint64_t)1816025742137285U, (uint64_t)12245672982162U,
-    (uint64_t)2119364213800870U, (uint64_t)2034960311715107U, (uint64_t)3172697815804487U,
-    (uint64_t)4185144850224160U, (uint64_t)2792055915674U, (uint64_t)795534452139321U,
-    (uint64_t)3647836177838185U, (uint64_t)2681403398797991U, (uint64_t)3149264270306207U,
-    (uint64_t)278704080615511U, (uint64_t)2752552368344718U, (uint64_t)1363840972378818U,
-    (uint64_t)1877521512083293U, (uint64_t)1862111388059470U, (uint64_t)36200324115014U,
-    (uint64_t)4183622899327217U, (uint64_t)747381675363076U, (uint64_t)2772916395314624U,
-    (uint64_t)833767013119965U, (uint64_t)246274452928088U, (uint64_t)1526238021297781U,
-    (uint64_t)3327534966022747U, (uint64_t)1169012581910517U, (uint64_t)4430894603030025U,
-    (uint64_t)149242742442115U, (uint64_t)1002569704307172U, (uint64_t)2763252093432365U,
-    (uint64_t)3037748497732938U, (uint64_t)2329811173939457U, (uint64_t)270769113180752U,
-    (uint64_t)4344092461623432U, (uint64_t)892200524589382U, (uint64_t)2511418516713970U,
-    (uint64_t)103575031265398U, (uint64_t)183736033430252U, (uint64_t)583003071257308U,
-    (uint64_t)3357167344738425U, (uint64_t)4038099763242651U, (uint64_t)1776250620957255U,
-    (uint64_t)51334115864192U, (uint64_t)2616405698969611U, (uint64_t)1196364755910565U,
-    (uint64_t)3135228056210500U, (uint64_t)533729417611761U, (uint64_t)86564351229326U,
-    (uint64_t)98936129527281U, (uint64_t)4425305036630677U, (uint64_t)2980296390253408U,
-    (uint64_t)2487091677325739U, (uint64_t)10501977234280U, (uint64_t)1805646499831077U,
-    (uint64_t)3120615962395477U, (uint64_t)3634629685307533U, (uint64_t)3009632755291436U,
-    (uint64_t)16794051906523U, (uint64_t)2465481597883214U, (uint64_t)211492787490403U,
-    (uint64_t)1120942867046103U, (uint64_t)486438308572108U, (uint64_t)76058986271771U,
-    (uint64_t)2435216584587357U, (uint64_t)3076359381968283U, (uint64_t)1071594491489655U,
-    (uint64_t)3148707450339154U, (uint64_t)249332205737851U, (uint64_t)4171051176626809U,
-    (uint64_t)3165176227956388U, (uint64_t)2400901591835233U, (uint64_t)1435783621333022U,
-    (uint64_t)20312753440321U, (uint64_t)1767293887448005U, (uint64_t)685150647587522U,
-    (uint64_t)2957187934449906U, (uint64_t)382661319140439U, (uint64_t)177583591139601U,
-    (uint64_t)2083572648630743U, (uint64_t)1083410277889419U, (uint64_t)4267902097868310U,
-    (uint64_t)679989918385081U, (uint64_t)123155311554032U, (uint64_t)2830267662472020U,
-    (uint64_t)4476040509735924U, (uint64_t)526697201585144U, (uint64_t)3465306430573135U,
-    (uint64_t)2296616218591U, (uint64_t)1270626872734279U, (uint64_t)1049740198790549U,
-    (uint64_t)4197567214843444U, (uint64_t)1962225231320591U, (uint64_t)186125026796856U,
-    (uint64_t)737027567341142U, (uint64_t)4364616098174U, (uint64_t)3618884818756660U,
-    (uint64_t)1236837563717668U, (uint64_t)162873772439548U, (uint64_t)3081542470065122U,
-    (uint64_t)910331750163991U, (uint64_t)2110498143869827U, (uint64_t)3208473121852657U,
-    (uint64_t)94687786224509U, (uint64_t)4113309027567819U, (uint64_t)4272179438357536U,
-    (uint64_t)1857418654076140U, (uint64_t)1672678841741004U, (uint64_t)94482160248411U,
-    (uint64_t)1928652436799020U, (uint64_t)1750866462381515U, (uint64_t)4048060485672270U,
-    (uint64_t)4006680581258587U, (uint64_t)14850434761312U, (uint64_t)2828734997081648U,
-    (uint64_t)1975589525873972U, (uint64_t)3724347738416009U, (uint64_t)597163266689736U,
-    (uint64_t)14568362978551U, (uint64_t)2203865455839744U, (uint64_t)2237034958890595U,
-    (uint64_t)1863572986731818U, (uint64_t)2329774560279041U, (uint64_t)245105447642201U,
-    (uint64_t)2179697447864822U, (uint64_t)1769609498189882U, (uint64_t)1916950746430931U,
-    (uint64_t)847019613787312U, (uint64_t)163210606565100U, (uint64_t)3658248417400062U,
-    (uint64_t)717138296045881U, (uint64_t)42531212306121U, (uint64_t)1040915917097532U,
-    (uint64_t)77364489101310U, (uint64_t)539253504015590U, (uint64_t)732690726289841U,
-    (uint64_t)3401622034697806U, (uint64_t)2864593278358513U, (uint64_t)142611941887017U,
-    (uint64_t)536364617506702U, (uint64_t)845071859974284U, (uint64_t)4461787417089721U,
-    (uint64_t)2633811871939723U, (uint64_t)113619731985610U, (uint64_t)2535870015489566U,
-    (uint64_t)2146224665077830U, (uint64_t)2593725534662047U, (uint64_t)1332349537449710U,
-    (uint64_t)153375287068096U, (uint64_t)3689977177165276U, (uint64_t)3631865615314120U,
-    (uint64_t)184644878348929U, (uint64_t)2220481726602813U, (uint64_t)204002551273091U,
-    (uint64_t)3022560051766785U, (uint64_t)3125940458001213U, (uint64_t)4258299086906325U,
-    (uint64_t)1072471915162030U, (uint64_t)2797562724530U, (uint64_t)3974298156223059U,
-    (uint64_t)1624778551002554U, (uint64_t)3490703864485971U, (uint64_t)2533877484212458U,
-    (uint64_t)176107782538555U, (uint64_t)4275987398312137U, (uint64_t)4397120757693722U,
-    (uint64_t)3001292763847390U, (uint64_t)1556490837621310U, (uint64_t)70442953037671U,
-    (uint64_t)1558915972545974U, (uint64_t)744724505252845U, (uint64_t)2697230204313363U,
-    (uint64_t)3495671924212144U, (uint64_t)95744296878924U, (uint64_t)1508848630912047U,
-    (uint64_t)4163599342850968U, (uint64_t)1234988733935901U, (uint64_t)3789722472212706U,
-    (uint64_t)219522007052022U, (uint64_t)2106597506701262U, (uint64_t)3231115099832239U,
-    (uint64_t)1296436890593905U, (uint64_t)1016795619587656U, (uint64_t)231150565033388U,
-    (uint64_t)4205501688458754U, (uint64_t)2271569140386062U, (uint64_t)3421769599058157U,
-    (uint64_t)4118408853784554U, (uint64_t)276709341465173U, (uint64_t)2681340614854362U,
-    (uint64_t)2514413365628788U, (uint64_t)62294545067341U, (uint64_t)277610220069365U,
-    (uint64_t)252463150123799U, (uint64_t)2547353593759399U, (uint64_t)1857438147448607U,
-    (uint64_t)2964811969681256U, (uint64_t)3303706463835387U, (uint64_t)248936570980853U,
-    (uint64_t)3208982702478009U, (uint64_t)2518671051730787U, (uint64_t)727433853033835U,
-    (uint64_t)1290389308223446U, (uint64_t)220742793981035U, (uint64_t)3851225361654709U,
-    (uint64_t)2307489307934273U, (uint64_t)1151710489948266U, (uint64_t)289775285210516U,
-    (uint64_t)222685002397295U, (uint64_t)1222117478082108U, (uint64_t)2822029169395728U,
-    (uint64_t)1172146252219882U, (uint64_t)2626108105510259U, (uint64_t)209803527887167U,
-    (uint64_t)2718831919953281U, (uint64_t)4348638387588593U, (uint64_t)3761438313263183U,
-    (uint64_t)13169515318095U, (uint64_t)212893621229476U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    34056422761564ULL, 3315864838337811ULL, 3797032336888745ULL, 2580641850480806ULL,
+    208048944042500ULL, 1233795288689421ULL, 1048795233382631ULL, 646545158071530ULL,
+    1816025742137285ULL, 12245672982162ULL, 2119364213800870ULL, 2034960311715107ULL,
+    3172697815804487ULL, 4185144850224160ULL, 2792055915674ULL, 795534452139321ULL,
+    3647836177838185ULL, 2681403398797991ULL, 3149264270306207ULL, 278704080615511ULL,
+    2752552368344718ULL, 1363840972378818ULL, 1877521512083293ULL, 1862111388059470ULL,
+    36200324115014ULL, 4183622899327217ULL, 747381675363076ULL, 2772916395314624ULL,
+    833767013119965ULL, 246274452928088ULL, 1526238021297781ULL, 3327534966022747ULL,
+    1169012581910517ULL, 4430894603030025ULL, 149242742442115ULL, 1002569704307172ULL,
+    2763252093432365ULL, 3037748497732938ULL, 2329811173939457ULL, 270769113180752ULL,
+    4344092461623432ULL, 892200524589382ULL, 2511418516713970ULL, 103575031265398ULL,
+    183736033430252ULL, 583003071257308ULL, 3357167344738425ULL, 4038099763242651ULL,
+    1776250620957255ULL, 51334115864192ULL, 2616405698969611ULL, 1196364755910565ULL,
+    3135228056210500ULL, 533729417611761ULL, 86564351229326ULL, 98936129527281ULL,
+    4425305036630677ULL, 2980296390253408ULL, 2487091677325739ULL, 10501977234280ULL,
+    1805646499831077ULL, 3120615962395477ULL, 3634629685307533ULL, 3009632755291436ULL,
+    16794051906523ULL, 2465481597883214ULL, 211492787490403ULL, 1120942867046103ULL,
+    486438308572108ULL, 76058986271771ULL, 2435216584587357ULL, 3076359381968283ULL,
+    1071594491489655ULL, 3148707450339154ULL, 249332205737851ULL, 4171051176626809ULL,
+    3165176227956388ULL, 2400901591835233ULL, 1435783621333022ULL, 20312753440321ULL,
+    1767293887448005ULL, 685150647587522ULL, 2957187934449906ULL, 382661319140439ULL,
+    177583591139601ULL, 2083572648630743ULL, 1083410277889419ULL, 4267902097868310ULL,
+    679989918385081ULL, 123155311554032ULL, 2830267662472020ULL, 4476040509735924ULL,
+    526697201585144ULL, 3465306430573135ULL, 2296616218591ULL, 1270626872734279ULL,
+    1049740198790549ULL, 4197567214843444ULL, 1962225231320591ULL, 186125026796856ULL,
+    737027567341142ULL, 4364616098174ULL, 3618884818756660ULL, 1236837563717668ULL,
+    162873772439548ULL, 3081542470065122ULL, 910331750163991ULL, 2110498143869827ULL,
+    3208473121852657ULL, 94687786224509ULL, 4113309027567819ULL, 4272179438357536ULL,
+    1857418654076140ULL, 1672678841741004ULL, 94482160248411ULL, 1928652436799020ULL,
+    1750866462381515ULL, 4048060485672270ULL, 4006680581258587ULL, 14850434761312ULL,
+    2828734997081648ULL, 1975589525873972ULL, 3724347738416009ULL, 597163266689736ULL,
+    14568362978551ULL, 2203865455839744ULL, 2237034958890595ULL, 1863572986731818ULL,
+    2329774560279041ULL, 245105447642201ULL, 2179697447864822ULL, 1769609498189882ULL,
+    1916950746430931ULL, 847019613787312ULL, 163210606565100ULL, 3658248417400062ULL,
+    717138296045881ULL, 42531212306121ULL, 1040915917097532ULL, 77364489101310ULL,
+    539253504015590ULL, 732690726289841ULL, 3401622034697806ULL, 2864593278358513ULL,
+    142611941887017ULL, 536364617506702ULL, 845071859974284ULL, 4461787417089721ULL,
+    2633811871939723ULL, 113619731985610ULL, 2535870015489566ULL, 2146224665077830ULL,
+    2593725534662047ULL, 1332349537449710ULL, 153375287068096ULL, 3689977177165276ULL,
+    3631865615314120ULL, 184644878348929ULL, 2220481726602813ULL, 204002551273091ULL,
+    3022560051766785ULL, 3125940458001213ULL, 4258299086906325ULL, 1072471915162030ULL,
+    2797562724530ULL, 3974298156223059ULL, 1624778551002554ULL, 3490703864485971ULL,
+    2533877484212458ULL, 176107782538555ULL, 4275987398312137ULL, 4397120757693722ULL,
+    3001292763847390ULL, 1556490837621310ULL, 70442953037671ULL, 1558915972545974ULL,
+    744724505252845ULL, 2697230204313363ULL, 3495671924212144ULL, 95744296878924ULL,
+    1508848630912047ULL, 4163599342850968ULL, 1234988733935901ULL, 3789722472212706ULL,
+    219522007052022ULL, 2106597506701262ULL, 3231115099832239ULL, 1296436890593905ULL,
+    1016795619587656ULL, 231150565033388ULL, 4205501688458754ULL, 2271569140386062ULL,
+    3421769599058157ULL, 4118408853784554ULL, 276709341465173ULL, 2681340614854362ULL,
+    2514413365628788ULL, 62294545067341ULL, 277610220069365ULL, 252463150123799ULL,
+    2547353593759399ULL, 1857438147448607ULL, 2964811969681256ULL, 3303706463835387ULL,
+    248936570980853ULL, 3208982702478009ULL, 2518671051730787ULL, 727433853033835ULL,
+    1290389308223446ULL, 220742793981035ULL, 3851225361654709ULL, 2307489307934273ULL,
+    1151710489948266ULL, 289775285210516ULL, 222685002397295ULL, 1222117478082108ULL,
+    2822029169395728ULL, 1172146252219882ULL, 2626108105510259ULL, 209803527887167ULL,
+    2718831919953281ULL, 4348638387588593ULL, 3761438313263183ULL, 13169515318095ULL,
+    212893621229476ULL
   };
 
 static const
 uint64_t
 Hacl_K256_PrecompTable_precomp_basepoint_table_w5[480U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)705178180786072U,
-    (uint64_t)3855836460717471U, (uint64_t)4089131105950716U, (uint64_t)3301581525494108U,
-    (uint64_t)133858670344668U, (uint64_t)2199641648059576U, (uint64_t)1278080618437060U,
-    (uint64_t)3959378566518708U, (uint64_t)3455034269351872U, (uint64_t)79417610544803U,
-    (uint64_t)1U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U,
-    (uint64_t)1282049064345544U, (uint64_t)971732600440099U, (uint64_t)1014594595727339U,
-    (uint64_t)4392159187541980U, (uint64_t)268327875692285U, (uint64_t)2411661712280539U,
-    (uint64_t)1092576199280126U, (uint64_t)4328619610718051U, (uint64_t)3535440816471627U,
-    (uint64_t)95182251488556U, (uint64_t)1893725512243753U, (uint64_t)3619861457111820U,
-    (uint64_t)879374960417905U, (uint64_t)2868056058129113U, (uint64_t)273195291893682U,
-    (uint64_t)2044797305960112U, (uint64_t)2357106853933780U, (uint64_t)3563112438336058U,
-    (uint64_t)2430811541762558U, (uint64_t)106443809495428U, (uint64_t)2231357633909668U,
-    (uint64_t)3641705835951936U, (uint64_t)80642569314189U, (uint64_t)2254841882373268U,
-    (uint64_t)149848031966573U, (uint64_t)2304615661367764U, (uint64_t)2410957403736446U,
-    (uint64_t)2712754805859804U, (uint64_t)2440183877540536U, (uint64_t)99784623895865U,
-    (uint64_t)3667773127482758U, (uint64_t)1354899394473308U, (uint64_t)3636602998800808U,
-    (uint64_t)2709296679846364U, (uint64_t)7253362091963U, (uint64_t)3585950735562744U,
-    (uint64_t)935775991758415U, (uint64_t)4108078106735201U, (uint64_t)556081800336307U,
-    (uint64_t)229585977163057U, (uint64_t)4055594186679801U, (uint64_t)1767681004944933U,
-    (uint64_t)1432634922083242U, (uint64_t)534935602949197U, (uint64_t)251753159522567U,
-    (uint64_t)2846474078499321U, (uint64_t)4488649590348702U, (uint64_t)2437476916025038U,
-    (uint64_t)3040577412822874U, (uint64_t)79405234918614U, (uint64_t)3030621226551508U,
-    (uint64_t)2801117003929806U, (uint64_t)1642927515498422U, (uint64_t)2802725079726297U,
-    (uint64_t)8472780626107U, (uint64_t)866068070352655U, (uint64_t)188080768545106U,
-    (uint64_t)2152119998903058U, (uint64_t)3391239985029665U, (uint64_t)23820026013564U,
-    (uint64_t)2965064154891949U, (uint64_t)1846516097921398U, (uint64_t)4418379948133146U,
-    (uint64_t)3137755426942400U, (uint64_t)47705291301781U, (uint64_t)4278533051105665U,
-    (uint64_t)3453643211214931U, (uint64_t)3379734319145156U, (uint64_t)3762442192097039U,
-    (uint64_t)40243003528694U, (uint64_t)4063448994211201U, (uint64_t)5697015368785U,
-    (uint64_t)1006545411838613U, (uint64_t)4242291693755210U, (uint64_t)135184629190512U,
-    (uint64_t)264898689131035U, (uint64_t)611796474823597U, (uint64_t)3255382250029089U,
-    (uint64_t)3490429246984696U, (uint64_t)236558595864362U, (uint64_t)2055934691551704U,
-    (uint64_t)1487711670114502U, (uint64_t)1823930698221632U, (uint64_t)2130937287438472U,
-    (uint64_t)154610053389779U, (uint64_t)2746573287023216U, (uint64_t)2430987262221221U,
-    (uint64_t)1668741642878689U, (uint64_t)904982541243977U, (uint64_t)56087343124948U,
-    (uint64_t)393905062353536U, (uint64_t)412681877350188U, (uint64_t)3153602040979977U,
-    (uint64_t)4466820876224989U, (uint64_t)146579165617857U, (uint64_t)2628741216508991U,
-    (uint64_t)747994231529806U, (uint64_t)750506569317681U, (uint64_t)1887492790748779U,
-    (uint64_t)35259008682771U, (uint64_t)2085116434894208U, (uint64_t)543291398921711U,
-    (uint64_t)1144362007901552U, (uint64_t)679305136036846U, (uint64_t)141090902244489U,
-    (uint64_t)632480954474859U, (uint64_t)2384513102652591U, (uint64_t)2225529790159790U,
-    (uint64_t)692258664851625U, (uint64_t)198681843567699U, (uint64_t)2397092587228181U,
-    (uint64_t)145862822166614U, (uint64_t)196976540479452U, (uint64_t)3321831130141455U,
-    (uint64_t)69266673089832U, (uint64_t)4469644227342284U, (uint64_t)3899271145504796U,
-    (uint64_t)1261890974076660U, (uint64_t)525357673886694U, (uint64_t)182135997828583U,
-    (uint64_t)4292760618810332U, (uint64_t)3404186545541683U, (uint64_t)312297386688768U,
-    (uint64_t)204377466824608U, (uint64_t)230900767857952U, (uint64_t)3871485172339693U,
-    (uint64_t)779449329662955U, (uint64_t)978655822464694U, (uint64_t)2278252139594027U,
-    (uint64_t)104641527040382U, (uint64_t)3528840153625765U, (uint64_t)4484699080275273U,
-    (uint64_t)1463971951102316U, (uint64_t)4013910812844749U, (uint64_t)228915589433620U,
-    (uint64_t)1209641433482461U, (uint64_t)4043178788774759U, (uint64_t)3008668238856634U,
-    (uint64_t)1448425089071412U, (uint64_t)26269719725037U, (uint64_t)3330785027545223U,
-    (uint64_t)852657975349259U, (uint64_t)227245054466105U, (uint64_t)1534632353984777U,
-    (uint64_t)207715098574660U, (uint64_t)3209837527352280U, (uint64_t)4051688046309066U,
-    (uint64_t)3839009590725955U, (uint64_t)1321506437398842U, (uint64_t)68340219159928U,
-    (uint64_t)1806950276956275U, (uint64_t)3923908055275295U, (uint64_t)743963253393575U,
-    (uint64_t)42162407478783U, (uint64_t)261334584474610U, (uint64_t)3728224928885214U,
-    (uint64_t)4004701081842869U, (uint64_t)709043201644674U, (uint64_t)4267294249150171U,
-    (uint64_t)255540582975025U, (uint64_t)875490593722211U, (uint64_t)796393708218375U,
-    (uint64_t)14774425627956U, (uint64_t)1500040516752097U, (uint64_t)141076627721678U,
-    (uint64_t)2634539368480628U, (uint64_t)1106488853550103U, (uint64_t)2346231921151930U,
-    (uint64_t)897108283954283U, (uint64_t)64616679559843U, (uint64_t)400244949840943U,
-    (uint64_t)1731263826831733U, (uint64_t)1649996579904651U, (uint64_t)3643693449640761U,
-    (uint64_t)172543068638991U, (uint64_t)329537981097182U, (uint64_t)2029799860802869U,
-    (uint64_t)4377737515208862U, (uint64_t)29103311051334U, (uint64_t)265583594111499U,
-    (uint64_t)3798074876561255U, (uint64_t)184749333259352U, (uint64_t)3117395073661801U,
-    (uint64_t)3695784565008833U, (uint64_t)64282709896721U, (uint64_t)1618968913246422U,
-    (uint64_t)3185235128095257U, (uint64_t)3288745068118692U, (uint64_t)1963818603508782U,
-    (uint64_t)281054350739495U, (uint64_t)1658639050810346U, (uint64_t)3061097601679552U,
-    (uint64_t)3023781433263746U, (uint64_t)2770283391242475U, (uint64_t)144508864751908U,
-    (uint64_t)173576288079856U, (uint64_t)46114579547054U, (uint64_t)1679480127300211U,
-    (uint64_t)1683062051644007U, (uint64_t)117183826129323U, (uint64_t)1894068608117440U,
-    (uint64_t)3846899838975733U, (uint64_t)4289279019496192U, (uint64_t)176995887914031U,
-    (uint64_t)78074942938713U, (uint64_t)454207263265292U, (uint64_t)972683614054061U,
-    (uint64_t)808474205144361U, (uint64_t)942703935951735U, (uint64_t)134460241077887U,
-    (uint64_t)2104196179349630U, (uint64_t)501632371208418U, (uint64_t)1666838991431177U,
-    (uint64_t)445606193139838U, (uint64_t)73704603396096U, (uint64_t)3140284774064777U,
-    (uint64_t)1356066420820179U, (uint64_t)227054159419281U, (uint64_t)1847611229198687U,
-    (uint64_t)82327838827660U, (uint64_t)3704027573265803U, (uint64_t)1585260489220244U,
-    (uint64_t)4404647914931933U, (uint64_t)2424649827425515U, (uint64_t)206821944206116U,
-    (uint64_t)1508635776287972U, (uint64_t)1933584575629676U, (uint64_t)1903635423783032U,
-    (uint64_t)4193642165165650U, (uint64_t)234321074690644U, (uint64_t)210406774251925U,
-    (uint64_t)1965845668185599U, (uint64_t)3059839433804731U, (uint64_t)1933300510683631U,
-    (uint64_t)150696600689211U, (uint64_t)4069293682158567U, (uint64_t)4346344602660044U,
-    (uint64_t)312200249664561U, (uint64_t)2495020807621840U, (uint64_t)1912707714385U,
-    (uint64_t)299345978159762U, (uint64_t)1164752722686920U, (uint64_t)225322433710338U,
-    (uint64_t)3128747381283759U, (uint64_t)275659067815583U, (uint64_t)1489671057429039U,
-    (uint64_t)1567693343342676U, (uint64_t)921672046098071U, (uint64_t)3707418899384085U,
-    (uint64_t)54646424931593U, (uint64_t)4026733380127147U, (uint64_t)2933435393699231U,
-    (uint64_t)3356593659521967U, (uint64_t)3637750749325529U, (uint64_t)232939412379045U,
-    (uint64_t)2298399636043069U, (uint64_t)270361546063041U, (uint64_t)2523933572551420U,
-    (uint64_t)3456896091572950U, (uint64_t)185447004732850U, (uint64_t)429322937697821U,
-    (uint64_t)2579704215668222U, (uint64_t)695065378803349U, (uint64_t)3987916247731243U,
-    (uint64_t)255159546348233U, (uint64_t)3057777929921282U, (uint64_t)1608970699916312U,
-    (uint64_t)1902369623063807U, (uint64_t)1413619643652777U, (uint64_t)94983996321227U,
-    (uint64_t)2832873179548050U, (uint64_t)4335430233622555U, (uint64_t)1559023976028843U,
-    (uint64_t)3297181988648895U, (uint64_t)100072021232323U, (uint64_t)2124984034109675U,
-    (uint64_t)4501252835618918U, (uint64_t)2053336899483297U, (uint64_t)638807226463876U,
-    (uint64_t)278445213600634U, (uint64_t)2311236445660555U, (uint64_t)303317664040012U,
-    (uint64_t)2659353858089024U, (uint64_t)3598827423980130U, (uint64_t)176059343827873U,
-    (uint64_t)3891639526275437U, (uint64_t)252823982819463U, (uint64_t)3404823300622345U,
-    (uint64_t)2758370772497456U, (uint64_t)91397496598783U, (uint64_t)2248661144141892U,
-    (uint64_t)491087075271969U, (uint64_t)1786344894571315U, (uint64_t)452497694885923U,
-    (uint64_t)34039628873357U, (uint64_t)2116503165025197U, (uint64_t)4436733709429923U,
-    (uint64_t)3045800776819238U, (uint64_t)1385518906078375U, (uint64_t)110495603336764U,
-    (uint64_t)4051447296249587U, (uint64_t)1103557421498625U, (uint64_t)1840785058439622U,
-    (uint64_t)425322753992314U, (uint64_t)98330046771676U, (uint64_t)365407468686431U,
-    (uint64_t)2611246859977123U, (uint64_t)3050253933135339U, (uint64_t)1006482220896688U,
-    (uint64_t)166818196428389U, (uint64_t)3415236093104372U, (uint64_t)1762308883882288U,
-    (uint64_t)1327828123094558U, (uint64_t)3403946425556706U, (uint64_t)96503464455441U,
-    (uint64_t)3893015304031471U, (uint64_t)3740839477490397U, (uint64_t)2411470812852231U,
-    (uint64_t)940927462436211U, (uint64_t)163825285911099U, (uint64_t)1622441495640386U,
-    (uint64_t)850224095680266U, (uint64_t)76199085900939U, (uint64_t)1941852365144042U,
-    (uint64_t)140326673652807U, (uint64_t)3161611011249524U, (uint64_t)317297150009965U,
-    (uint64_t)2145053259340619U, (uint64_t)2180498176457552U, (uint64_t)38457740506224U,
-    (uint64_t)394174899129468U, (uint64_t)2687474560485245U, (uint64_t)1542175980184516U,
-    (uint64_t)1628502671124819U, (uint64_t)48477401124385U, (uint64_t)4474181600025082U,
-    (uint64_t)2142747956365708U, (uint64_t)1638299432475478U, (uint64_t)2005869320353249U,
-    (uint64_t)112292630760956U, (uint64_t)1887521965171588U, (uint64_t)457587531429696U,
-    (uint64_t)840994209504042U, (uint64_t)4268060856325798U, (uint64_t)195597993440388U,
-    (uint64_t)4148484749020338U, (uint64_t)2074885000909672U, (uint64_t)2309839019263165U,
-    (uint64_t)2087616209681024U, (uint64_t)257214370719966U, (uint64_t)2331363508376581U,
-    (uint64_t)1233124357504711U, (uint64_t)2849542202650296U, (uint64_t)3790982825325736U,
-    (uint64_t)13381453503890U, (uint64_t)1665246594531069U, (uint64_t)4165624287443904U,
-    (uint64_t)3418759698027493U, (uint64_t)2118493255117399U, (uint64_t)136249206366067U,
-    (uint64_t)4064050233283309U, (uint64_t)1368779887911300U, (uint64_t)4370550759530269U,
-    (uint64_t)66992990631341U, (uint64_t)84442368922270U, (uint64_t)2139322635321394U,
-    (uint64_t)2076163483726795U, (uint64_t)657097866349103U, (uint64_t)2095579409488071U,
-    (uint64_t)226525774791341U, (uint64_t)4445744257665359U, (uint64_t)2035752839278107U,
-    (uint64_t)1998242662838304U, (uint64_t)1601548415521694U, (uint64_t)151297684296198U,
-    (uint64_t)1350963039017303U, (uint64_t)2624916349548281U, (uint64_t)2018863259670197U,
-    (uint64_t)2717274357461290U, (uint64_t)94024796961533U, (uint64_t)711335520409111U,
-    (uint64_t)4322093765820263U, (uint64_t)2041650358174649U, (uint64_t)3439791603157577U,
-    (uint64_t)179292018616267U, (uint64_t)2436436921286669U, (uint64_t)3905268797208340U,
-    (uint64_t)2829194895162985U, (uint64_t)1355175382191543U, (uint64_t)55128779761539U,
-    (uint64_t)2648428998786922U, (uint64_t)869805912573515U, (uint64_t)3706708942847864U,
-    (uint64_t)2785288916584667U, (uint64_t)37156862850147U, (uint64_t)1422245336293228U,
-    (uint64_t)4497066058933021U, (uint64_t)85588912978349U, (uint64_t)2616252221194611U,
-    (uint64_t)53506393720989U, (uint64_t)3727539190732644U, (uint64_t)872132446545237U,
-    (uint64_t)933583590986077U, (uint64_t)3794591170581203U, (uint64_t)167875550514069U,
-    (uint64_t)2267466834993297U, (uint64_t)3072652681756816U, (uint64_t)2108499037430803U,
-    (uint64_t)1606735192928366U, (uint64_t)72339568815255U, (uint64_t)3258484260684219U,
-    (uint64_t)3277927277719855U, (uint64_t)2459560373011535U, (uint64_t)1672794293294033U,
-    (uint64_t)227460934880669U, (uint64_t)3702454405413705U, (uint64_t)106168148441676U,
-    (uint64_t)1356617643071159U, (uint64_t)3280896569942762U, (uint64_t)142618711614302U,
-    (uint64_t)4291782740862057U, (uint64_t)4141020884874235U, (uint64_t)3720787221267125U,
-    (uint64_t)552884940089351U, (uint64_t)174626154407180U, (uint64_t)972071013326540U,
-    (uint64_t)4458530419931903U, (uint64_t)4435168973822858U, (uint64_t)1902967548748411U,
-    (uint64_t)53007977605840U, (uint64_t)2453997334323925U, (uint64_t)3653077937283262U,
-    (uint64_t)850660265046356U, (uint64_t)312721924805450U, (uint64_t)268503679240683U,
-    (uint64_t)256960167714122U, (uint64_t)1474492507858350U, (uint64_t)2456345526438488U,
-    (uint64_t)3686029507160255U, (uint64_t)279158933010398U, (uint64_t)3646946293948063U,
-    (uint64_t)704477527214036U, (uint64_t)3387744169891031U, (uint64_t)3772622670980241U,
-    (uint64_t)136368897543304U, (uint64_t)3744894052577607U, (uint64_t)1976007214443430U,
-    (uint64_t)2090045379763451U, (uint64_t)968565474458988U, (uint64_t)234295114806066U
+    0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
+    705178180786072ULL, 3855836460717471ULL, 4089131105950716ULL, 3301581525494108ULL,
+    133858670344668ULL, 2199641648059576ULL, 1278080618437060ULL, 3959378566518708ULL,
+    3455034269351872ULL, 79417610544803ULL, 1ULL, 0ULL, 0ULL, 0ULL, 0ULL, 1282049064345544ULL,
+    971732600440099ULL, 1014594595727339ULL, 4392159187541980ULL, 268327875692285ULL,
+    2411661712280539ULL, 1092576199280126ULL, 4328619610718051ULL, 3535440816471627ULL,
+    95182251488556ULL, 1893725512243753ULL, 3619861457111820ULL, 879374960417905ULL,
+    2868056058129113ULL, 273195291893682ULL, 2044797305960112ULL, 2357106853933780ULL,
+    3563112438336058ULL, 2430811541762558ULL, 106443809495428ULL, 2231357633909668ULL,
+    3641705835951936ULL, 80642569314189ULL, 2254841882373268ULL, 149848031966573ULL,
+    2304615661367764ULL, 2410957403736446ULL, 2712754805859804ULL, 2440183877540536ULL,
+    99784623895865ULL, 3667773127482758ULL, 1354899394473308ULL, 3636602998800808ULL,
+    2709296679846364ULL, 7253362091963ULL, 3585950735562744ULL, 935775991758415ULL,
+    4108078106735201ULL, 556081800336307ULL, 229585977163057ULL, 4055594186679801ULL,
+    1767681004944933ULL, 1432634922083242ULL, 534935602949197ULL, 251753159522567ULL,
+    2846474078499321ULL, 4488649590348702ULL, 2437476916025038ULL, 3040577412822874ULL,
+    79405234918614ULL, 3030621226551508ULL, 2801117003929806ULL, 1642927515498422ULL,
+    2802725079726297ULL, 8472780626107ULL, 866068070352655ULL, 188080768545106ULL,
+    2152119998903058ULL, 3391239985029665ULL, 23820026013564ULL, 2965064154891949ULL,
+    1846516097921398ULL, 4418379948133146ULL, 3137755426942400ULL, 47705291301781ULL,
+    4278533051105665ULL, 3453643211214931ULL, 3379734319145156ULL, 3762442192097039ULL,
+    40243003528694ULL, 4063448994211201ULL, 5697015368785ULL, 1006545411838613ULL,
+    4242291693755210ULL, 135184629190512ULL, 264898689131035ULL, 611796474823597ULL,
+    3255382250029089ULL, 3490429246984696ULL, 236558595864362ULL, 2055934691551704ULL,
+    1487711670114502ULL, 1823930698221632ULL, 2130937287438472ULL, 154610053389779ULL,
+    2746573287023216ULL, 2430987262221221ULL, 1668741642878689ULL, 904982541243977ULL,
+    56087343124948ULL, 393905062353536ULL, 412681877350188ULL, 3153602040979977ULL,
+    4466820876224989ULL, 146579165617857ULL, 2628741216508991ULL, 747994231529806ULL,
+    750506569317681ULL, 1887492790748779ULL, 35259008682771ULL, 2085116434894208ULL,
+    543291398921711ULL, 1144362007901552ULL, 679305136036846ULL, 141090902244489ULL,
+    632480954474859ULL, 2384513102652591ULL, 2225529790159790ULL, 692258664851625ULL,
+    198681843567699ULL, 2397092587228181ULL, 145862822166614ULL, 196976540479452ULL,
+    3321831130141455ULL, 69266673089832ULL, 4469644227342284ULL, 3899271145504796ULL,
+    1261890974076660ULL, 525357673886694ULL, 182135997828583ULL, 4292760618810332ULL,
+    3404186545541683ULL, 312297386688768ULL, 204377466824608ULL, 230900767857952ULL,
+    3871485172339693ULL, 779449329662955ULL, 978655822464694ULL, 2278252139594027ULL,
+    104641527040382ULL, 3528840153625765ULL, 4484699080275273ULL, 1463971951102316ULL,
+    4013910812844749ULL, 228915589433620ULL, 1209641433482461ULL, 4043178788774759ULL,
+    3008668238856634ULL, 1448425089071412ULL, 26269719725037ULL, 3330785027545223ULL,
+    852657975349259ULL, 227245054466105ULL, 1534632353984777ULL, 207715098574660ULL,
+    3209837527352280ULL, 4051688046309066ULL, 3839009590725955ULL, 1321506437398842ULL,
+    68340219159928ULL, 1806950276956275ULL, 3923908055275295ULL, 743963253393575ULL,
+    42162407478783ULL, 261334584474610ULL, 3728224928885214ULL, 4004701081842869ULL,
+    709043201644674ULL, 4267294249150171ULL, 255540582975025ULL, 875490593722211ULL,
+    796393708218375ULL, 14774425627956ULL, 1500040516752097ULL, 141076627721678ULL,
+    2634539368480628ULL, 1106488853550103ULL, 2346231921151930ULL, 897108283954283ULL,
+    64616679559843ULL, 400244949840943ULL, 1731263826831733ULL, 1649996579904651ULL,
+    3643693449640761ULL, 172543068638991ULL, 329537981097182ULL, 2029799860802869ULL,
+    4377737515208862ULL, 29103311051334ULL, 265583594111499ULL, 3798074876561255ULL,
+    184749333259352ULL, 3117395073661801ULL, 3695784565008833ULL, 64282709896721ULL,
+    1618968913246422ULL, 3185235128095257ULL, 3288745068118692ULL, 1963818603508782ULL,
+    281054350739495ULL, 1658639050810346ULL, 3061097601679552ULL, 3023781433263746ULL,
+    2770283391242475ULL, 144508864751908ULL, 173576288079856ULL, 46114579547054ULL,
+    1679480127300211ULL, 1683062051644007ULL, 117183826129323ULL, 1894068608117440ULL,
+    3846899838975733ULL, 4289279019496192ULL, 176995887914031ULL, 78074942938713ULL,
+    454207263265292ULL, 972683614054061ULL, 808474205144361ULL, 942703935951735ULL,
+    134460241077887ULL, 2104196179349630ULL, 501632371208418ULL, 1666838991431177ULL,
+    445606193139838ULL, 73704603396096ULL, 3140284774064777ULL, 1356066420820179ULL,
+    227054159419281ULL, 1847611229198687ULL, 82327838827660ULL, 3704027573265803ULL,
+    1585260489220244ULL, 4404647914931933ULL, 2424649827425515ULL, 206821944206116ULL,
+    1508635776287972ULL, 1933584575629676ULL, 1903635423783032ULL, 4193642165165650ULL,
+    234321074690644ULL, 210406774251925ULL, 1965845668185599ULL, 3059839433804731ULL,
+    1933300510683631ULL, 150696600689211ULL, 4069293682158567ULL, 4346344602660044ULL,
+    312200249664561ULL, 2495020807621840ULL, 1912707714385ULL, 299345978159762ULL,
+    1164752722686920ULL, 225322433710338ULL, 3128747381283759ULL, 275659067815583ULL,
+    1489671057429039ULL, 1567693343342676ULL, 921672046098071ULL, 3707418899384085ULL,
+    54646424931593ULL, 4026733380127147ULL, 2933435393699231ULL, 3356593659521967ULL,
+    3637750749325529ULL, 232939412379045ULL, 2298399636043069ULL, 270361546063041ULL,
+    2523933572551420ULL, 3456896091572950ULL, 185447004732850ULL, 429322937697821ULL,
+    2579704215668222ULL, 695065378803349ULL, 3987916247731243ULL, 255159546348233ULL,
+    3057777929921282ULL, 1608970699916312ULL, 1902369623063807ULL, 1413619643652777ULL,
+    94983996321227ULL, 2832873179548050ULL, 4335430233622555ULL, 1559023976028843ULL,
+    3297181988648895ULL, 100072021232323ULL, 2124984034109675ULL, 4501252835618918ULL,
+    2053336899483297ULL, 638807226463876ULL, 278445213600634ULL, 2311236445660555ULL,
+    303317664040012ULL, 2659353858089024ULL, 3598827423980130ULL, 176059343827873ULL,
+    3891639526275437ULL, 252823982819463ULL, 3404823300622345ULL, 2758370772497456ULL,
+    91397496598783ULL, 2248661144141892ULL, 491087075271969ULL, 1786344894571315ULL,
+    452497694885923ULL, 34039628873357ULL, 2116503165025197ULL, 4436733709429923ULL,
+    3045800776819238ULL, 1385518906078375ULL, 110495603336764ULL, 4051447296249587ULL,
+    1103557421498625ULL, 1840785058439622ULL, 425322753992314ULL, 98330046771676ULL,
+    365407468686431ULL, 2611246859977123ULL, 3050253933135339ULL, 1006482220896688ULL,
+    166818196428389ULL, 3415236093104372ULL, 1762308883882288ULL, 1327828123094558ULL,
+    3403946425556706ULL, 96503464455441ULL, 3893015304031471ULL, 3740839477490397ULL,
+    2411470812852231ULL, 940927462436211ULL, 163825285911099ULL, 1622441495640386ULL,
+    850224095680266ULL, 76199085900939ULL, 1941852365144042ULL, 140326673652807ULL,
+    3161611011249524ULL, 317297150009965ULL, 2145053259340619ULL, 2180498176457552ULL,
+    38457740506224ULL, 394174899129468ULL, 2687474560485245ULL, 1542175980184516ULL,
+    1628502671124819ULL, 48477401124385ULL, 4474181600025082ULL, 2142747956365708ULL,
+    1638299432475478ULL, 2005869320353249ULL, 112292630760956ULL, 1887521965171588ULL,
+    457587531429696ULL, 840994209504042ULL, 4268060856325798ULL, 195597993440388ULL,
+    4148484749020338ULL, 2074885000909672ULL, 2309839019263165ULL, 2087616209681024ULL,
+    257214370719966ULL, 2331363508376581ULL, 1233124357504711ULL, 2849542202650296ULL,
+    3790982825325736ULL, 13381453503890ULL, 1665246594531069ULL, 4165624287443904ULL,
+    3418759698027493ULL, 2118493255117399ULL, 136249206366067ULL, 4064050233283309ULL,
+    1368779887911300ULL, 4370550759530269ULL, 66992990631341ULL, 84442368922270ULL,
+    2139322635321394ULL, 2076163483726795ULL, 657097866349103ULL, 2095579409488071ULL,
+    226525774791341ULL, 4445744257665359ULL, 2035752839278107ULL, 1998242662838304ULL,
+    1601548415521694ULL, 151297684296198ULL, 1350963039017303ULL, 2624916349548281ULL,
+    2018863259670197ULL, 2717274357461290ULL, 94024796961533ULL, 711335520409111ULL,
+    4322093765820263ULL, 2041650358174649ULL, 3439791603157577ULL, 179292018616267ULL,
+    2436436921286669ULL, 3905268797208340ULL, 2829194895162985ULL, 1355175382191543ULL,
+    55128779761539ULL, 2648428998786922ULL, 869805912573515ULL, 3706708942847864ULL,
+    2785288916584667ULL, 37156862850147ULL, 1422245336293228ULL, 4497066058933021ULL,
+    85588912978349ULL, 2616252221194611ULL, 53506393720989ULL, 3727539190732644ULL,
+    872132446545237ULL, 933583590986077ULL, 3794591170581203ULL, 167875550514069ULL,
+    2267466834993297ULL, 3072652681756816ULL, 2108499037430803ULL, 1606735192928366ULL,
+    72339568815255ULL, 3258484260684219ULL, 3277927277719855ULL, 2459560373011535ULL,
+    1672794293294033ULL, 227460934880669ULL, 3702454405413705ULL, 106168148441676ULL,
+    1356617643071159ULL, 3280896569942762ULL, 142618711614302ULL, 4291782740862057ULL,
+    4141020884874235ULL, 3720787221267125ULL, 552884940089351ULL, 174626154407180ULL,
+    972071013326540ULL, 4458530419931903ULL, 4435168973822858ULL, 1902967548748411ULL,
+    53007977605840ULL, 2453997334323925ULL, 3653077937283262ULL, 850660265046356ULL,
+    312721924805450ULL, 268503679240683ULL, 256960167714122ULL, 1474492507858350ULL,
+    2456345526438488ULL, 3686029507160255ULL, 279158933010398ULL, 3646946293948063ULL,
+    704477527214036ULL, 3387744169891031ULL, 3772622670980241ULL, 136368897543304ULL,
+    3744894052577607ULL, 1976007214443430ULL, 2090045379763451ULL, 968565474458988ULL,
+    234295114806066ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/internal/Hacl_Poly1305_256.h b/include/msvc/internal/Hacl_MAC_Poly1305.h
similarity index 77%
rename from include/internal/Hacl_Poly1305_256.h
rename to include/msvc/internal/Hacl_MAC_Poly1305.h
index 21d78b16..29e1734a 100644
--- a/include/internal/Hacl_Poly1305_256.h
+++ b/include/msvc/internal/Hacl_MAC_Poly1305.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __internal_Hacl_Poly1305_256_H
-#define __internal_Hacl_Poly1305_256_H
+#ifndef __internal_Hacl_MAC_Poly1305_H
+#define __internal_Hacl_MAC_Poly1305_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,21 +35,15 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-#include "../Hacl_Poly1305_256.h"
-#include "libintvector.h"
+#include "../Hacl_MAC_Poly1305.h"
 
-void
-Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b);
+void Hacl_MAC_Poly1305_poly1305_init(uint64_t *ctx, uint8_t *key);
 
-void
-Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
-  Lib_IntVector_Intrinsics_vec256 *out,
-  Lib_IntVector_Intrinsics_vec256 *p
-);
+void Hacl_MAC_Poly1305_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx);
 
 #if defined(__cplusplus)
 }
 #endif
 
-#define __internal_Hacl_Poly1305_256_H_DEFINED
+#define __internal_Hacl_MAC_Poly1305_H_DEFINED
 #endif
diff --git a/include/Hacl_Poly1305_128.h b/include/msvc/internal/Hacl_MAC_Poly1305_Simd128.h
similarity index 72%
rename from include/Hacl_Poly1305_128.h
rename to include/msvc/internal/Hacl_MAC_Poly1305_Simd128.h
index 834d4a8a..fe120e43 100644
--- a/include/Hacl_Poly1305_128.h
+++ b/include/msvc/internal/Hacl_MAC_Poly1305_Simd128.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Poly1305_128_H
-#define __Hacl_Poly1305_128_H
+#ifndef __internal_Hacl_MAC_Poly1305_Simd128_H
+#define __internal_Hacl_MAC_Poly1305_Simd128_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,33 +35,30 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
+#include "../Hacl_MAC_Poly1305_Simd128.h"
 #include "libintvector.h"
 
-typedef Lib_IntVector_Intrinsics_vec128 *Hacl_Poly1305_128_poly1305_ctx;
-
-void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key);
-
-void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *text);
+void Hacl_MAC_Poly1305_Simd128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b);
 
 void
-Hacl_Poly1305_128_poly1305_update(
-  Lib_IntVector_Intrinsics_vec128 *ctx,
-  uint32_t len,
-  uint8_t *text
+Hacl_MAC_Poly1305_Simd128_fmul_r2_normalize(
+  Lib_IntVector_Intrinsics_vec128 *out,
+  Lib_IntVector_Intrinsics_vec128 *p
 );
 
 void
-Hacl_Poly1305_128_poly1305_finish(
+Hacl_MAC_Poly1305_Simd128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key);
+
+void
+Hacl_MAC_Poly1305_Simd128_poly1305_finish(
   uint8_t *tag,
   uint8_t *key,
   Lib_IntVector_Intrinsics_vec128 *ctx
 );
 
-void Hacl_Poly1305_128_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key);
-
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Poly1305_128_H_DEFINED
+#define __internal_Hacl_MAC_Poly1305_Simd128_H_DEFINED
 #endif
diff --git a/include/msvc/Hacl_Poly1305_256.h b/include/msvc/internal/Hacl_MAC_Poly1305_Simd256.h
similarity index 72%
rename from include/msvc/Hacl_Poly1305_256.h
rename to include/msvc/internal/Hacl_MAC_Poly1305_Simd256.h
index 9d1ae8c3..7bf106c1 100644
--- a/include/msvc/Hacl_Poly1305_256.h
+++ b/include/msvc/internal/Hacl_MAC_Poly1305_Simd256.h
@@ -23,8 +23,8 @@
  */
 
 
-#ifndef __Hacl_Poly1305_256_H
-#define __Hacl_Poly1305_256_H
+#ifndef __internal_Hacl_MAC_Poly1305_Simd256_H
+#define __internal_Hacl_MAC_Poly1305_Simd256_H
 
 #if defined(__cplusplus)
 extern "C" {
@@ -35,33 +35,30 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
+#include "../Hacl_MAC_Poly1305_Simd256.h"
 #include "libintvector.h"
 
-typedef Lib_IntVector_Intrinsics_vec256 *Hacl_Poly1305_256_poly1305_ctx;
-
-void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key);
-
-void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *text);
+void Hacl_MAC_Poly1305_Simd256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b);
 
 void
-Hacl_Poly1305_256_poly1305_update(
-  Lib_IntVector_Intrinsics_vec256 *ctx,
-  uint32_t len,
-  uint8_t *text
+Hacl_MAC_Poly1305_Simd256_fmul_r4_normalize(
+  Lib_IntVector_Intrinsics_vec256 *out,
+  Lib_IntVector_Intrinsics_vec256 *p
 );
 
 void
-Hacl_Poly1305_256_poly1305_finish(
+Hacl_MAC_Poly1305_Simd256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key);
+
+void
+Hacl_MAC_Poly1305_Simd256_poly1305_finish(
   uint8_t *tag,
   uint8_t *key,
   Lib_IntVector_Intrinsics_vec256 *ctx
 );
 
-void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key);
-
 #if defined(__cplusplus)
 }
 #endif
 
-#define __Hacl_Poly1305_256_H_DEFINED
+#define __internal_Hacl_MAC_Poly1305_Simd256_H_DEFINED
 #endif
diff --git a/include/msvc/internal/Hacl_P256_PrecompTable.h b/include/msvc/internal/Hacl_P256_PrecompTable.h
index f185c2be..c852ef8c 100644
--- a/include/msvc/internal/Hacl_P256_PrecompTable.h
+++ b/include/msvc/internal/Hacl_P256_PrecompTable.h
@@ -39,476 +39,360 @@ static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_basepoint_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U,
-    (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U,
-    (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U,
-    (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U,
-    (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U,
-    (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U,
-    (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U,
-    (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U,
-    (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U,
-    (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U,
-    (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U,
-    (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U,
-    (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U,
-    (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U,
-    (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U,
-    (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U,
-    (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U,
-    (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U,
-    (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U,
-    (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U,
-    (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U,
-    (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U,
-    (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U,
-    (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U,
-    (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U,
-    (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U,
-    (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U,
-    (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U,
-    (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U,
-    (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U,
-    (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U,
-    (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U,
-    (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U,
-    (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U,
-    (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U,
-    (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U,
-    (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U,
-    (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U,
-    (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U,
-    (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U,
-    (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U,
-    (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U,
-    (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U,
-    (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U,
-    (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U,
-    (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U,
-    (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U,
-    (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U,
-    (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U,
-    (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U,
-    (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U,
-    (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U,
-    (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U,
-    (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U,
-    (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U,
-    (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U,
-    (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U,
-    (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U,
-    (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U,
-    (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U,
-    (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U,
-    (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U,
-    (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U,
-    (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U,
-    (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U,
-    (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U,
-    (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U,
-    (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 8784043285714375740ULL, 8483257759279461889ULL, 8789745728267363600ULL,
+    1770019616739251654ULL, 15992936863339206154ULL, 10037038012062884956ULL,
+    15197544864945402661ULL, 9615747158586711429ULL, 1ULL, 18446744069414584320ULL,
+    18446744073709551615ULL, 4294967294ULL, 10634854829044225757ULL, 351552716085025155ULL,
+    10645315080955407736ULL, 3609262091244858135ULL, 15760741698986874125ULL,
+    14936374388219697827ULL, 15751360096993017895ULL, 18012233706239762398ULL,
+    1993877568177495041ULL, 10345888787846536528ULL, 7746511691117935375ULL,
+    14517043990409914413ULL, 14122549297570634151ULL, 16934610359517083771ULL,
+    5724511325497097418ULL, 8983432969107448705ULL, 2687429970334080245ULL, 16525396802810050288ULL,
+    7602596488871585854ULL, 4813919589149203084ULL, 7680395813780804519ULL, 6687709583048023590ULL,
+    18086445169104142027ULL, 9637814708330203929ULL, 14785108459960679090ULL,
+    3838023279095023581ULL, 3555615526157830307ULL, 5177066488380472871ULL, 18218186719108038403ULL,
+    16281556341699656105ULL, 1524227924561461191ULL, 4148060517641909597ULL, 2858290374115363433ULL,
+    8942772026334130620ULL, 3034451298319885113ULL, 8447866036736640940ULL, 11204933433076256578ULL,
+    18333595740249588297ULL, 8259597024804538246ULL, 9539734295777539786ULL, 9797290423046626413ULL,
+    5777303437849646537ULL, 8739356909899132020ULL, 14815960973766782158ULL,
+    15286581798204509801ULL, 17597362577777019682ULL, 13259283710820519742ULL,
+    10501322996899164670ULL, 1221138904338319642ULL, 14586685489551951885ULL, 895326705426031212ULL,
+    14398171728560617847ULL, 9592550823745097391ULL, 17240998489162206026ULL,
+    8085479283308189196ULL, 14844657737893882826ULL, 15923425394150618234ULL,
+    2997808084773249525ULL, 494323555453660587ULL, 1215695327517794764ULL, 9476207381098391690ULL,
+    7480789678419122995ULL, 15212230329321082489ULL, 436189395349576388ULL, 17377474396456660834ULL,
+    15237013929655017939ULL, 11444428846883781676ULL, 5112749694521428575ULL, 950829367509872073ULL,
+    17665036182057559519ULL, 17205133339690002313ULL, 16233765170251334549ULL,
+    10122775683257972591ULL, 3352514236455632420ULL, 9143148522359954691ULL, 601191684005658860ULL,
+    13398772186646349998ULL, 15512696600132928431ULL, 9128416073728948653ULL,
+    11233051033546138578ULL, 6769345682610122833ULL, 10823233224575054288ULL,
+    9997725227559980175ULL, 6733425642852897415ULL, 16302206918151466066ULL, 1669330822143265921ULL,
+    2661645605036546002ULL, 17182558479745802165ULL, 1165082692376932040ULL, 9470595929011488359ULL,
+    6142147329285324932ULL, 4829075085998111287ULL, 10231370681107338930ULL, 9591876895322495239ULL,
+    10316468561384076618ULL, 11592503647238064235ULL, 13395813606055179632ULL,
+    511127033980815508ULL, 12434976573147649880ULL, 3425094795384359127ULL, 6816971736303023445ULL,
+    15444670609021139344ULL, 9464349818322082360ULL, 16178216413042376883ULL,
+    9595540370774317348ULL, 7229365182662875710ULL, 4601177649460012843ULL, 5455046447382487090ULL,
+    10854066421606187521ULL, 15913416821879788071ULL, 2297365362023460173ULL,
+    2603252216454941350ULL, 6768791943870490934ULL, 15705936687122754810ULL, 9537096567546600694ULL,
+    17580538144855035062ULL, 4496542856965746638ULL, 8444341625922124942ULL,
+    12191263903636183168ULL, 17427332907535974165ULL, 14307569739254103736ULL,
+    13900598742063266169ULL, 7176996424355977650ULL, 5709008170379717479ULL,
+    14471312052264549092ULL, 1464519909491759867ULL, 3328154641049602121ULL,
+    13020349337171136774ULL, 2772166279972051938ULL, 10854476939425975292ULL,
+    1967189930534630940ULL, 2802919076529341959ULL, 14792226094833519208ULL,
+    14675640928566522177ULL, 14838974364643800837ULL, 17631460696099549980ULL,
+    17434186275364935469ULL, 2665648200587705473ULL, 13202122464492564051ULL,
+    7576287350918073341ULL, 2272206013910186424ULL, 14558761641743937843ULL, 5675729149929979729ULL,
+    9043135187561613166ULL, 11750149293830589225ULL, 740555197954307911ULL, 9871738005087190699ULL,
+    17178667634283502053ULL, 18046255991533013265ULL, 4458222096988430430ULL,
+    8452427758526311627ULL, 13825286929656615266ULL, 13956286357198391218ULL,
+    15875692916799995079ULL, 10634895319157013920ULL, 13230116118036304207ULL,
+    8795317393614625606ULL, 7001710806858862020ULL, 7949746088586183478ULL, 14677556044923602317ULL,
+    11184023437485843904ULL, 11215864722023085094ULL, 6444464081471519014ULL,
+    1706241174022415217ULL, 8243975633057550613ULL, 15502902453836085864ULL, 3799182188594003953ULL,
+    3538840175098724094ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1499621593102562565U,
-    (uint64_t)16692369783039433128U, (uint64_t)15337520135922861848U,
-    (uint64_t)5455737214495366228U, (uint64_t)17827017231032529600U,
-    (uint64_t)12413621606240782649U, (uint64_t)2290483008028286132U,
-    (uint64_t)15752017553340844820U, (uint64_t)4846430910634234874U,
-    (uint64_t)10861682798464583253U, (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U,
-    (uint64_t)9866710912401645115U, (uint64_t)1162548847543228595U, (uint64_t)7649967190445130486U,
-    (uint64_t)5212340432230915749U, (uint64_t)7572620550182916491U, (uint64_t)14876145112448665096U,
-    (uint64_t)2063227348838176167U, (uint64_t)3519435548295415847U, (uint64_t)8390400282019023103U,
-    (uint64_t)17666843593163037841U, (uint64_t)9450204148816496323U, (uint64_t)8483374507652916768U,
-    (uint64_t)6254661047265818424U, (uint64_t)16382127809582285023U, (uint64_t)125359443771153172U,
-    (uint64_t)1374336701588437897U, (uint64_t)11362596098420127726U, (uint64_t)2101654420738681387U,
-    (uint64_t)12772780342444840510U, (uint64_t)12546934328908550060U,
-    (uint64_t)8331880412333790397U, (uint64_t)11687262051473819904U, (uint64_t)8926848496503457587U,
-    (uint64_t)9603974142010467857U, (uint64_t)13199952163826973175U, (uint64_t)2189856264898797734U,
-    (uint64_t)11356074861870267226U, (uint64_t)2027714896422561895U, (uint64_t)5261606367808050149U,
-    (uint64_t)153855954337762312U, (uint64_t)6375919692894573986U, (uint64_t)12364041207536146533U,
-    (uint64_t)1891896010455057160U, (uint64_t)1568123795087313171U, (uint64_t)18138710056556660101U,
-    (uint64_t)6004886947510047736U, (uint64_t)4811859325589542932U, (uint64_t)3618763430148954981U,
-    (uint64_t)11434521746258554122U, (uint64_t)10086341535864049427U,
-    (uint64_t)8073421629570399570U, (uint64_t)12680586148814729338U, (uint64_t)9619958020761569612U,
-    (uint64_t)15827203580658384478U, (uint64_t)12832694810937550406U,
-    (uint64_t)14977975484447400910U, (uint64_t)5478002389061063653U,
-    (uint64_t)14731136312639060880U, (uint64_t)4317867687275472033U, (uint64_t)6642650962855259884U,
-    (uint64_t)2514254944289495285U, (uint64_t)14231405641534478436U, (uint64_t)4045448346091518946U,
-    (uint64_t)8985477013445972471U, (uint64_t)8869039454457032149U, (uint64_t)4356978486208692970U,
-    (uint64_t)10805288613335538577U, (uint64_t)12832353127812502042U,
-    (uint64_t)4576590051676547490U, (uint64_t)6728053735138655107U, (uint64_t)17814206719173206184U,
-    (uint64_t)79790138573994940U, (uint64_t)17920293215101822267U, (uint64_t)13422026625585728864U,
-    (uint64_t)5018058010492547271U, (uint64_t)110232326023384102U, (uint64_t)10834264070056942976U,
-    (uint64_t)15222249086119088588U, (uint64_t)15119439519142044997U,
-    (uint64_t)11655511970063167313U, (uint64_t)1614477029450566107U, (uint64_t)3619322817271059794U,
-    (uint64_t)9352862040415412867U, (uint64_t)14017522553242747074U,
-    (uint64_t)13138513643674040327U, (uint64_t)3610195242889455765U, (uint64_t)8371069193996567291U,
-    (uint64_t)12670227996544662654U, (uint64_t)1205961025092146303U,
-    (uint64_t)13106709934003962112U, (uint64_t)4350113471327723407U,
-    (uint64_t)15060941403739680459U, (uint64_t)13639127647823205030U,
-    (uint64_t)10790943339357725715U, (uint64_t)498760574280648264U, (uint64_t)17922071907832082887U,
-    (uint64_t)15122670976670152145U, (uint64_t)6275027991110214322U, (uint64_t)7250912847491816402U,
-    (uint64_t)15206617260142982380U, (uint64_t)3385668313694152877U,
-    (uint64_t)17522479771766801905U, (uint64_t)2965919117476170655U, (uint64_t)1553238516603269404U,
-    (uint64_t)5820770015631050991U, (uint64_t)4999445222232605348U, (uint64_t)9245650860833717444U,
-    (uint64_t)1508811811724230728U, (uint64_t)5190684913765614385U, (uint64_t)15692927070934536166U,
-    (uint64_t)12981978499190500902U, (uint64_t)5143491963193394698U, (uint64_t)7705698092144084129U,
-    (uint64_t)581120653055084783U, (uint64_t)13886552864486459714U, (uint64_t)6290301270652587255U,
-    (uint64_t)8663431529954393128U, (uint64_t)17033405846475472443U, (uint64_t)5206780355442651635U,
-    (uint64_t)12580364474736467688U, (uint64_t)17934601912005283310U,
-    (uint64_t)15119491731028933652U, (uint64_t)17848231399859044858U,
-    (uint64_t)4427673319524919329U, (uint64_t)2673607337074368008U, (uint64_t)14034876464294699949U,
-    (uint64_t)10938948975420813697U, (uint64_t)15202340615298669183U,
-    (uint64_t)5496603454069431071U, (uint64_t)2486526142064906845U, (uint64_t)4507882119510526802U,
-    (uint64_t)13888151172411390059U, (uint64_t)15049027856908071726U,
-    (uint64_t)9667231543181973158U, (uint64_t)6406671575277563202U, (uint64_t)3395801050331215139U,
-    (uint64_t)9813607433539108308U, (uint64_t)2681417728820980381U, (uint64_t)18407064643927113994U,
-    (uint64_t)7707177692113485527U, (uint64_t)14218149384635317074U, (uint64_t)3658668346206375919U,
-    (uint64_t)15404713991002362166U, (uint64_t)10152074687696195207U,
-    (uint64_t)10926946599582128139U, (uint64_t)16907298600007085320U,
-    (uint64_t)16544287219664720279U, (uint64_t)11007075933432813205U,
-    (uint64_t)8652245965145713599U, (uint64_t)7857626748965990384U, (uint64_t)5602306604520095870U,
-    (uint64_t)2525139243938658618U, (uint64_t)14405696176872077447U,
-    (uint64_t)18432270482137885332U, (uint64_t)9913880809120071177U,
-    (uint64_t)16896141737831216972U, (uint64_t)7484791498211214829U,
-    (uint64_t)15635259968266497469U, (uint64_t)8495118537612215624U, (uint64_t)4915477980562575356U,
-    (uint64_t)16453519279754924350U, (uint64_t)14462108244565406969U,
-    (uint64_t)14837837755237096687U, (uint64_t)14130171078892575346U,
-    (uint64_t)15423793222528491497U, (uint64_t)5460399262075036084U,
-    (uint64_t)16085440580308415349U, (uint64_t)26873200736954488U, (uint64_t)5603655807457499550U,
-    (uint64_t)3342202915871129617U, (uint64_t)1604413932150236626U, (uint64_t)9684226585089458974U,
-    (uint64_t)1213229904006618539U, (uint64_t)6782978662408837236U, (uint64_t)11197029877749307372U,
-    (uint64_t)14085968786551657744U, (uint64_t)17352273610494009342U,
-    (uint64_t)7876582961192434984U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 1499621593102562565ULL, 16692369783039433128ULL,
+    15337520135922861848ULL, 5455737214495366228ULL, 17827017231032529600ULL,
+    12413621606240782649ULL, 2290483008028286132ULL, 15752017553340844820ULL,
+    4846430910634234874ULL, 10861682798464583253ULL, 15404737222404363049ULL, 363586619281562022ULL,
+    9866710912401645115ULL, 1162548847543228595ULL, 7649967190445130486ULL, 5212340432230915749ULL,
+    7572620550182916491ULL, 14876145112448665096ULL, 2063227348838176167ULL, 3519435548295415847ULL,
+    8390400282019023103ULL, 17666843593163037841ULL, 9450204148816496323ULL, 8483374507652916768ULL,
+    6254661047265818424ULL, 16382127809582285023ULL, 125359443771153172ULL, 1374336701588437897ULL,
+    11362596098420127726ULL, 2101654420738681387ULL, 12772780342444840510ULL,
+    12546934328908550060ULL, 8331880412333790397ULL, 11687262051473819904ULL,
+    8926848496503457587ULL, 9603974142010467857ULL, 13199952163826973175ULL, 2189856264898797734ULL,
+    11356074861870267226ULL, 2027714896422561895ULL, 5261606367808050149ULL, 153855954337762312ULL,
+    6375919692894573986ULL, 12364041207536146533ULL, 1891896010455057160ULL, 1568123795087313171ULL,
+    18138710056556660101ULL, 6004886947510047736ULL, 4811859325589542932ULL, 3618763430148954981ULL,
+    11434521746258554122ULL, 10086341535864049427ULL, 8073421629570399570ULL,
+    12680586148814729338ULL, 9619958020761569612ULL, 15827203580658384478ULL,
+    12832694810937550406ULL, 14977975484447400910ULL, 5478002389061063653ULL,
+    14731136312639060880ULL, 4317867687275472033ULL, 6642650962855259884ULL, 2514254944289495285ULL,
+    14231405641534478436ULL, 4045448346091518946ULL, 8985477013445972471ULL, 8869039454457032149ULL,
+    4356978486208692970ULL, 10805288613335538577ULL, 12832353127812502042ULL,
+    4576590051676547490ULL, 6728053735138655107ULL, 17814206719173206184ULL, 79790138573994940ULL,
+    17920293215101822267ULL, 13422026625585728864ULL, 5018058010492547271ULL, 110232326023384102ULL,
+    10834264070056942976ULL, 15222249086119088588ULL, 15119439519142044997ULL,
+    11655511970063167313ULL, 1614477029450566107ULL, 3619322817271059794ULL, 9352862040415412867ULL,
+    14017522553242747074ULL, 13138513643674040327ULL, 3610195242889455765ULL,
+    8371069193996567291ULL, 12670227996544662654ULL, 1205961025092146303ULL,
+    13106709934003962112ULL, 4350113471327723407ULL, 15060941403739680459ULL,
+    13639127647823205030ULL, 10790943339357725715ULL, 498760574280648264ULL,
+    17922071907832082887ULL, 15122670976670152145ULL, 6275027991110214322ULL,
+    7250912847491816402ULL, 15206617260142982380ULL, 3385668313694152877ULL,
+    17522479771766801905ULL, 2965919117476170655ULL, 1553238516603269404ULL, 5820770015631050991ULL,
+    4999445222232605348ULL, 9245650860833717444ULL, 1508811811724230728ULL, 5190684913765614385ULL,
+    15692927070934536166ULL, 12981978499190500902ULL, 5143491963193394698ULL,
+    7705698092144084129ULL, 581120653055084783ULL, 13886552864486459714ULL, 6290301270652587255ULL,
+    8663431529954393128ULL, 17033405846475472443ULL, 5206780355442651635ULL,
+    12580364474736467688ULL, 17934601912005283310ULL, 15119491731028933652ULL,
+    17848231399859044858ULL, 4427673319524919329ULL, 2673607337074368008ULL,
+    14034876464294699949ULL, 10938948975420813697ULL, 15202340615298669183ULL,
+    5496603454069431071ULL, 2486526142064906845ULL, 4507882119510526802ULL, 13888151172411390059ULL,
+    15049027856908071726ULL, 9667231543181973158ULL, 6406671575277563202ULL, 3395801050331215139ULL,
+    9813607433539108308ULL, 2681417728820980381ULL, 18407064643927113994ULL, 7707177692113485527ULL,
+    14218149384635317074ULL, 3658668346206375919ULL, 15404713991002362166ULL,
+    10152074687696195207ULL, 10926946599582128139ULL, 16907298600007085320ULL,
+    16544287219664720279ULL, 11007075933432813205ULL, 8652245965145713599ULL,
+    7857626748965990384ULL, 5602306604520095870ULL, 2525139243938658618ULL, 14405696176872077447ULL,
+    18432270482137885332ULL, 9913880809120071177ULL, 16896141737831216972ULL,
+    7484791498211214829ULL, 15635259968266497469ULL, 8495118537612215624ULL, 4915477980562575356ULL,
+    16453519279754924350ULL, 14462108244565406969ULL, 14837837755237096687ULL,
+    14130171078892575346ULL, 15423793222528491497ULL, 5460399262075036084ULL,
+    16085440580308415349ULL, 26873200736954488ULL, 5603655807457499550ULL, 3342202915871129617ULL,
+    1604413932150236626ULL, 9684226585089458974ULL, 1213229904006618539ULL, 6782978662408837236ULL,
+    11197029877749307372ULL, 14085968786551657744ULL, 17352273610494009342ULL,
+    7876582961192434984ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)14619254753077084366U,
-    (uint64_t)13913835116514008593U, (uint64_t)15060744674088488145U,
-    (uint64_t)17668414598203068685U, (uint64_t)10761169236902342334U,
-    (uint64_t)15467027479157446221U, (uint64_t)14989185522423469618U,
-    (uint64_t)14354539272510107003U, (uint64_t)14298211796392133693U,
-    (uint64_t)13270323784253711450U, (uint64_t)13380964971965046957U,
-    (uint64_t)8686204248456909699U, (uint64_t)17434630286744937066U, (uint64_t)1355903775279084720U,
-    (uint64_t)7554695053550308662U, (uint64_t)11354971222741863570U, (uint64_t)564601613420749879U,
-    (uint64_t)8466325837259054896U, (uint64_t)10752965181772434263U,
-    (uint64_t)11405876547368426319U, (uint64_t)13791894568738930940U,
-    (uint64_t)8230587134406354675U, (uint64_t)12415514098722758608U,
-    (uint64_t)18414183046995786744U, (uint64_t)15508000368227372870U,
-    (uint64_t)5781062464627999307U, (uint64_t)15339429052219195590U,
-    (uint64_t)16038703753810741903U, (uint64_t)9587718938298980714U, (uint64_t)4822658817952386407U,
-    (uint64_t)1376351024833260660U, (uint64_t)1120174910554766702U, (uint64_t)1730170933262569274U,
-    (uint64_t)5187428548444533500U, (uint64_t)16242053503368957131U, (uint64_t)3036811119519868279U,
-    (uint64_t)1760267587958926638U, (uint64_t)170244572981065185U, (uint64_t)8063080791967388171U,
-    (uint64_t)4824892826607692737U, (uint64_t)16286391083472040552U,
-    (uint64_t)11945158615253358747U, (uint64_t)14096887760410224200U,
-    (uint64_t)1613720831904557039U, (uint64_t)14316966673761197523U,
-    (uint64_t)17411006201485445341U, (uint64_t)8112301506943158801U, (uint64_t)2069889233927989984U,
-    (uint64_t)10082848378277483927U, (uint64_t)3609691194454404430U, (uint64_t)6110437205371933689U,
-    (uint64_t)9769135977342231601U, (uint64_t)11977962151783386478U,
-    (uint64_t)18088718692559983573U, (uint64_t)11741637975753055U, (uint64_t)11110390325701582190U,
-    (uint64_t)1341402251566067019U, (uint64_t)3028229550849726478U, (uint64_t)10438984083997451310U,
-    (uint64_t)12730851885100145709U, (uint64_t)11524169532089894189U,
-    (uint64_t)4523375903229602674U, (uint64_t)2028602258037385622U, (uint64_t)17082839063089388410U,
-    (uint64_t)6103921364634113167U, (uint64_t)17066180888225306102U,
-    (uint64_t)11395680486707876195U, (uint64_t)10952892272443345484U,
-    (uint64_t)8792831960605859401U, (uint64_t)14194485427742325139U,
-    (uint64_t)15146020821144305250U, (uint64_t)1654766014957123343U, (uint64_t)7955526243090948551U,
-    (uint64_t)3989277566080493308U, (uint64_t)12229385116397931231U,
-    (uint64_t)13430548930727025562U, (uint64_t)3434892688179800602U, (uint64_t)8431998794645622027U,
-    (uint64_t)12132530981596299272U, (uint64_t)2289461608863966999U,
-    (uint64_t)18345870950201487179U, (uint64_t)13517947207801901576U,
-    (uint64_t)5213113244172561159U, (uint64_t)17632986594098340879U, (uint64_t)4405251818133148856U,
-    (uint64_t)11783009269435447793U, (uint64_t)9332138983770046035U,
-    (uint64_t)12863411548922539505U, (uint64_t)3717030292816178224U,
-    (uint64_t)10026078446427137374U, (uint64_t)11167295326594317220U,
-    (uint64_t)12425328773141588668U, (uint64_t)5760335125172049352U, (uint64_t)9016843701117277863U,
-    (uint64_t)5657892835694680172U, (uint64_t)11025130589305387464U, (uint64_t)1368484957977406173U,
-    (uint64_t)17361351345281258834U, (uint64_t)1907113641956152700U,
-    (uint64_t)16439233413531427752U, (uint64_t)5893322296986588932U,
-    (uint64_t)14000206906171746627U, (uint64_t)14979266987545792900U,
-    (uint64_t)6926291766898221120U, (uint64_t)7162023296083360752U, (uint64_t)14762747553625382529U,
-    (uint64_t)12610831658612406849U, (uint64_t)10462926899548715515U,
-    (uint64_t)4794017723140405312U, (uint64_t)5234438200490163319U, (uint64_t)8019519110339576320U,
-    (uint64_t)7194604241290530100U, (uint64_t)12626770134810813246U,
-    (uint64_t)10793074474236419890U, (uint64_t)11323224347913978783U,
-    (uint64_t)16831128015895380245U, (uint64_t)18323094195124693378U,
-    (uint64_t)2361097165281567692U, (uint64_t)15755578675014279498U,
-    (uint64_t)14289876470325854580U, (uint64_t)12856787656093616839U,
-    (uint64_t)3578928531243900594U, (uint64_t)3847532758790503699U, (uint64_t)8377953190224748743U,
-    (uint64_t)3314546646092744596U, (uint64_t)800810188859334358U, (uint64_t)4626344124229343596U,
-    (uint64_t)6620381605850876621U, (uint64_t)11422073570955989527U,
-    (uint64_t)12676813626484814469U, (uint64_t)16725029886764122240U,
-    (uint64_t)16648497372773830008U, (uint64_t)9135702594931291048U,
-    (uint64_t)16080949688826680333U, (uint64_t)11528096561346602947U,
-    (uint64_t)2632498067099740984U, (uint64_t)11583842699108800714U, (uint64_t)8378404864573610526U,
-    (uint64_t)1076560261627788534U, (uint64_t)13836015994325032828U,
-    (uint64_t)11234295937817067909U, (uint64_t)5893659808396722708U,
-    (uint64_t)11277421142886984364U, (uint64_t)8968549037166726491U,
-    (uint64_t)14841374331394032822U, (uint64_t)9967344773947889341U, (uint64_t)8799244393578496085U,
-    (uint64_t)5094686877301601410U, (uint64_t)8780316747074726862U, (uint64_t)9119697306829835718U,
-    (uint64_t)15381243327921855368U, (uint64_t)2686250164449435196U,
-    (uint64_t)16466917280442198358U, (uint64_t)13791704489163125216U,
-    (uint64_t)16955859337117924272U, (uint64_t)17112836394923783642U,
-    (uint64_t)4639176427338618063U, (uint64_t)16770029310141094964U,
-    (uint64_t)11049953922966416185U, (uint64_t)12012669590884098968U,
-    (uint64_t)4859326885929417214U, (uint64_t)896380084392586061U, (uint64_t)7153028362977034008U,
-    (uint64_t)10540021163316263301U, (uint64_t)9318277998512936585U,
-    (uint64_t)18344496977694796523U, (uint64_t)11374737400567645494U,
-    (uint64_t)17158800051138212954U, (uint64_t)18343197867863253153U,
-    (uint64_t)18204799297967861226U, (uint64_t)15798973531606348828U,
-    (uint64_t)9870158263408310459U, (uint64_t)17578869832774612627U, (uint64_t)8395748875822696932U,
-    (uint64_t)15310679007370670872U, (uint64_t)11205576736030808860U,
-    (uint64_t)10123429210002838967U, (uint64_t)5910544144088393959U,
-    (uint64_t)14016615653353687369U, (uint64_t)11191676704772957822U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 14619254753077084366ULL, 13913835116514008593ULL,
+    15060744674088488145ULL, 17668414598203068685ULL, 10761169236902342334ULL,
+    15467027479157446221ULL, 14989185522423469618ULL, 14354539272510107003ULL,
+    14298211796392133693ULL, 13270323784253711450ULL, 13380964971965046957ULL,
+    8686204248456909699ULL, 17434630286744937066ULL, 1355903775279084720ULL, 7554695053550308662ULL,
+    11354971222741863570ULL, 564601613420749879ULL, 8466325837259054896ULL, 10752965181772434263ULL,
+    11405876547368426319ULL, 13791894568738930940ULL, 8230587134406354675ULL,
+    12415514098722758608ULL, 18414183046995786744ULL, 15508000368227372870ULL,
+    5781062464627999307ULL, 15339429052219195590ULL, 16038703753810741903ULL,
+    9587718938298980714ULL, 4822658817952386407ULL, 1376351024833260660ULL, 1120174910554766702ULL,
+    1730170933262569274ULL, 5187428548444533500ULL, 16242053503368957131ULL, 3036811119519868279ULL,
+    1760267587958926638ULL, 170244572981065185ULL, 8063080791967388171ULL, 4824892826607692737ULL,
+    16286391083472040552ULL, 11945158615253358747ULL, 14096887760410224200ULL,
+    1613720831904557039ULL, 14316966673761197523ULL, 17411006201485445341ULL,
+    8112301506943158801ULL, 2069889233927989984ULL, 10082848378277483927ULL, 3609691194454404430ULL,
+    6110437205371933689ULL, 9769135977342231601ULL, 11977962151783386478ULL,
+    18088718692559983573ULL, 11741637975753055ULL, 11110390325701582190ULL, 1341402251566067019ULL,
+    3028229550849726478ULL, 10438984083997451310ULL, 12730851885100145709ULL,
+    11524169532089894189ULL, 4523375903229602674ULL, 2028602258037385622ULL,
+    17082839063089388410ULL, 6103921364634113167ULL, 17066180888225306102ULL,
+    11395680486707876195ULL, 10952892272443345484ULL, 8792831960605859401ULL,
+    14194485427742325139ULL, 15146020821144305250ULL, 1654766014957123343ULL,
+    7955526243090948551ULL, 3989277566080493308ULL, 12229385116397931231ULL,
+    13430548930727025562ULL, 3434892688179800602ULL, 8431998794645622027ULL,
+    12132530981596299272ULL, 2289461608863966999ULL, 18345870950201487179ULL,
+    13517947207801901576ULL, 5213113244172561159ULL, 17632986594098340879ULL,
+    4405251818133148856ULL, 11783009269435447793ULL, 9332138983770046035ULL,
+    12863411548922539505ULL, 3717030292816178224ULL, 10026078446427137374ULL,
+    11167295326594317220ULL, 12425328773141588668ULL, 5760335125172049352ULL,
+    9016843701117277863ULL, 5657892835694680172ULL, 11025130589305387464ULL, 1368484957977406173ULL,
+    17361351345281258834ULL, 1907113641956152700ULL, 16439233413531427752ULL,
+    5893322296986588932ULL, 14000206906171746627ULL, 14979266987545792900ULL,
+    6926291766898221120ULL, 7162023296083360752ULL, 14762747553625382529ULL,
+    12610831658612406849ULL, 10462926899548715515ULL, 4794017723140405312ULL,
+    5234438200490163319ULL, 8019519110339576320ULL, 7194604241290530100ULL, 12626770134810813246ULL,
+    10793074474236419890ULL, 11323224347913978783ULL, 16831128015895380245ULL,
+    18323094195124693378ULL, 2361097165281567692ULL, 15755578675014279498ULL,
+    14289876470325854580ULL, 12856787656093616839ULL, 3578928531243900594ULL,
+    3847532758790503699ULL, 8377953190224748743ULL, 3314546646092744596ULL, 800810188859334358ULL,
+    4626344124229343596ULL, 6620381605850876621ULL, 11422073570955989527ULL,
+    12676813626484814469ULL, 16725029886764122240ULL, 16648497372773830008ULL,
+    9135702594931291048ULL, 16080949688826680333ULL, 11528096561346602947ULL,
+    2632498067099740984ULL, 11583842699108800714ULL, 8378404864573610526ULL, 1076560261627788534ULL,
+    13836015994325032828ULL, 11234295937817067909ULL, 5893659808396722708ULL,
+    11277421142886984364ULL, 8968549037166726491ULL, 14841374331394032822ULL,
+    9967344773947889341ULL, 8799244393578496085ULL, 5094686877301601410ULL, 8780316747074726862ULL,
+    9119697306829835718ULL, 15381243327921855368ULL, 2686250164449435196ULL,
+    16466917280442198358ULL, 13791704489163125216ULL, 16955859337117924272ULL,
+    17112836394923783642ULL, 4639176427338618063ULL, 16770029310141094964ULL,
+    11049953922966416185ULL, 12012669590884098968ULL, 4859326885929417214ULL, 896380084392586061ULL,
+    7153028362977034008ULL, 10540021163316263301ULL, 9318277998512936585ULL,
+    18344496977694796523ULL, 11374737400567645494ULL, 17158800051138212954ULL,
+    18343197867863253153ULL, 18204799297967861226ULL, 15798973531606348828ULL,
+    9870158263408310459ULL, 17578869832774612627ULL, 8395748875822696932ULL,
+    15310679007370670872ULL, 11205576736030808860ULL, 10123429210002838967ULL,
+    5910544144088393959ULL, 14016615653353687369ULL, 11191676704772957822ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4[192U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)7870395003430845958U,
-    (uint64_t)18001862936410067720U, (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U,
-    (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U, (uint64_t)7139806720777708306U,
-    (uint64_t)8253938546650739833U, (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U,
-    (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U, (uint64_t)8392845221328116213U,
-    (uint64_t)14630296398338540788U, (uint64_t)4268947906723414372U, (uint64_t)9231207002243517909U,
-    (uint64_t)14261219637616504262U, (uint64_t)7786881626982345356U,
-    (uint64_t)11412720751765882139U, (uint64_t)14119585051365330009U,
-    (uint64_t)15281626286521302128U, (uint64_t)6350171933454266732U,
-    (uint64_t)16559468304937127866U, (uint64_t)13200760478271693417U,
-    (uint64_t)6733381546280350776U, (uint64_t)3801404890075189193U, (uint64_t)2741036364686993903U,
-    (uint64_t)3218612940540174008U, (uint64_t)10894914335165419505U,
-    (uint64_t)11862941430149998362U, (uint64_t)4223151729402839584U, (uint64_t)2913215088487087887U,
-    (uint64_t)14562168920104952953U, (uint64_t)2170089393468287453U,
-    (uint64_t)10520900655016579352U, (uint64_t)7040362608949989273U, (uint64_t)8376510559381705307U,
-    (uint64_t)9142237200448131532U, (uint64_t)5696859948123854080U, (uint64_t)925422306716081180U,
-    (uint64_t)11155545953469186421U, (uint64_t)1888208646862572812U,
-    (uint64_t)11151095998248845721U, (uint64_t)15793503271680275267U,
-    (uint64_t)7729877044494854851U, (uint64_t)6235134673193032913U, (uint64_t)7364280682182401564U,
-    (uint64_t)5479679373325519985U, (uint64_t)17966037684582301763U,
-    (uint64_t)14140891609330279185U, (uint64_t)5814744449740463867U, (uint64_t)5652588426712591652U,
-    (uint64_t)774745682988690912U, (uint64_t)13228255573220500373U, (uint64_t)11949122068786859397U,
-    (uint64_t)8021166392900770376U, (uint64_t)7994323710948720063U, (uint64_t)9924618472877849977U,
-    (uint64_t)17618517523141194266U, (uint64_t)2750424097794401714U,
-    (uint64_t)15481749570715253207U, (uint64_t)14646964509921760497U,
-    (uint64_t)1037442848094301355U, (uint64_t)6295995947389299132U, (uint64_t)16915049722317579514U,
-    (uint64_t)10493877400992990313U, (uint64_t)18391008753060553521U, (uint64_t)483942209623707598U,
-    (uint64_t)2017775662838016613U, (uint64_t)5933251998459363553U, (uint64_t)11789135019970707407U,
-    (uint64_t)5484123723153268336U, (uint64_t)13246954648848484954U, (uint64_t)4774374393926023505U,
-    (uint64_t)14863995618704457336U, (uint64_t)13220153167104973625U,
-    (uint64_t)5988445485312390826U, (uint64_t)17580359464028944682U, (uint64_t)7297100131969874771U,
-    (uint64_t)379931507867989375U, (uint64_t)10927113096513421444U, (uint64_t)17688881974428340857U,
-    (uint64_t)4259872578781463333U, (uint64_t)8573076295966784472U, (uint64_t)16389829450727275032U,
-    (uint64_t)1667243868963568259U, (uint64_t)17730726848925960919U,
-    (uint64_t)11408899874569778008U, (uint64_t)3576527582023272268U,
-    (uint64_t)16492920640224231656U, (uint64_t)7906130545972460130U,
-    (uint64_t)13878604278207681266U, (uint64_t)41446695125652041U, (uint64_t)8891615271337333503U,
-    (uint64_t)2594537723613594470U, (uint64_t)7699579176995770924U, (uint64_t)147458463055730655U,
-    (uint64_t)12120406862739088406U, (uint64_t)12044892493010567063U,
-    (uint64_t)8554076749615475136U, (uint64_t)1005097692260929999U, (uint64_t)2687202654471188715U,
-    (uint64_t)9457588752176879209U, (uint64_t)17472884880062444019U, (uint64_t)9792097892056020166U,
-    (uint64_t)2525246678512797150U, (uint64_t)15958903035313115662U,
-    (uint64_t)11336038170342247032U, (uint64_t)11560342382835141123U,
-    (uint64_t)6212009033479929024U, (uint64_t)8214308203775021229U, (uint64_t)8475469210070503698U,
-    (uint64_t)13287024123485719563U, (uint64_t)12956951963817520723U,
-    (uint64_t)10693035819908470465U, (uint64_t)11375478788224786725U,
-    (uint64_t)16934625208487120398U, (uint64_t)10094585729115874495U,
-    (uint64_t)2763884524395905776U, (uint64_t)13535890148969964883U,
-    (uint64_t)13514657411765064358U, (uint64_t)9903074440788027562U,
-    (uint64_t)17324720726421199990U, (uint64_t)2273931039117368789U, (uint64_t)3442641041506157854U,
-    (uint64_t)1119853641236409612U, (uint64_t)12037070344296077989U, (uint64_t)581736433335671746U,
-    (uint64_t)6019150647054369174U, (uint64_t)14864096138068789375U, (uint64_t)6652995210998318662U,
-    (uint64_t)12773883697029175304U, (uint64_t)12751275631451845119U,
-    (uint64_t)11449095003038250478U, (uint64_t)1025805267334366480U, (uint64_t)2764432500300815015U,
-    (uint64_t)18274564429002844381U, (uint64_t)10445634195592600351U,
-    (uint64_t)11814099592837202735U, (uint64_t)5006796893679120289U, (uint64_t)6908397253997261914U,
-    (uint64_t)13266696965302879279U, (uint64_t)7768715053015037430U, (uint64_t)3569923738654785686U,
-    (uint64_t)5844853453464857549U, (uint64_t)1837340805629559110U, (uint64_t)1034657624388283114U,
-    (uint64_t)711244516069456460U, (uint64_t)12519286026957934814U, (uint64_t)2613464944620837619U,
-    (uint64_t)10003023321338286213U, (uint64_t)7291332092642881376U, (uint64_t)9832199564117004897U,
-    (uint64_t)3280736694860799890U, (uint64_t)6416452202849179874U, (uint64_t)7326961381798642069U,
-    (uint64_t)8435688798040635029U, (uint64_t)16630141263910982958U,
-    (uint64_t)17222635514422533318U, (uint64_t)9482787389178881499U, (uint64_t)836561194658263905U,
-    (uint64_t)3405319043337616649U, (uint64_t)2786146577568026518U, (uint64_t)7625483685691626321U,
-    (uint64_t)6728084875304656716U, (uint64_t)1140997959232544268U, (uint64_t)12847384827606303792U,
-    (uint64_t)1719121337754572070U, (uint64_t)12863589482936438532U, (uint64_t)3880712899640530862U,
-    (uint64_t)2748456882813671564U, (uint64_t)4775988900044623019U, (uint64_t)8937847374382191162U,
-    (uint64_t)3767367347172252295U, (uint64_t)13468672401049388646U,
-    (uint64_t)14359032216842397576U, (uint64_t)2002555958685443975U,
-    (uint64_t)16488678606651526810U, (uint64_t)11826135409597474760U,
-    (uint64_t)15296495673182508601U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 7870395003430845958ULL, 18001862936410067720ULL, 8006461232116967215ULL,
+    5921313779532424762ULL, 10702113371959864307ULL, 8070517410642379879ULL, 7139806720777708306ULL,
+    8253938546650739833ULL, 17490482834545705718ULL, 1065249776797037500ULL, 5018258455937968775ULL,
+    14100621120178668337ULL, 8392845221328116213ULL, 14630296398338540788ULL,
+    4268947906723414372ULL, 9231207002243517909ULL, 14261219637616504262ULL, 7786881626982345356ULL,
+    11412720751765882139ULL, 14119585051365330009ULL, 15281626286521302128ULL,
+    6350171933454266732ULL, 16559468304937127866ULL, 13200760478271693417ULL,
+    6733381546280350776ULL, 3801404890075189193ULL, 2741036364686993903ULL, 3218612940540174008ULL,
+    10894914335165419505ULL, 11862941430149998362ULL, 4223151729402839584ULL,
+    2913215088487087887ULL, 14562168920104952953ULL, 2170089393468287453ULL,
+    10520900655016579352ULL, 7040362608949989273ULL, 8376510559381705307ULL, 9142237200448131532ULL,
+    5696859948123854080ULL, 925422306716081180ULL, 11155545953469186421ULL, 1888208646862572812ULL,
+    11151095998248845721ULL, 15793503271680275267ULL, 7729877044494854851ULL,
+    6235134673193032913ULL, 7364280682182401564ULL, 5479679373325519985ULL, 17966037684582301763ULL,
+    14140891609330279185ULL, 5814744449740463867ULL, 5652588426712591652ULL, 774745682988690912ULL,
+    13228255573220500373ULL, 11949122068786859397ULL, 8021166392900770376ULL,
+    7994323710948720063ULL, 9924618472877849977ULL, 17618517523141194266ULL, 2750424097794401714ULL,
+    15481749570715253207ULL, 14646964509921760497ULL, 1037442848094301355ULL,
+    6295995947389299132ULL, 16915049722317579514ULL, 10493877400992990313ULL,
+    18391008753060553521ULL, 483942209623707598ULL, 2017775662838016613ULL, 5933251998459363553ULL,
+    11789135019970707407ULL, 5484123723153268336ULL, 13246954648848484954ULL,
+    4774374393926023505ULL, 14863995618704457336ULL, 13220153167104973625ULL,
+    5988445485312390826ULL, 17580359464028944682ULL, 7297100131969874771ULL, 379931507867989375ULL,
+    10927113096513421444ULL, 17688881974428340857ULL, 4259872578781463333ULL,
+    8573076295966784472ULL, 16389829450727275032ULL, 1667243868963568259ULL,
+    17730726848925960919ULL, 11408899874569778008ULL, 3576527582023272268ULL,
+    16492920640224231656ULL, 7906130545972460130ULL, 13878604278207681266ULL, 41446695125652041ULL,
+    8891615271337333503ULL, 2594537723613594470ULL, 7699579176995770924ULL, 147458463055730655ULL,
+    12120406862739088406ULL, 12044892493010567063ULL, 8554076749615475136ULL,
+    1005097692260929999ULL, 2687202654471188715ULL, 9457588752176879209ULL, 17472884880062444019ULL,
+    9792097892056020166ULL, 2525246678512797150ULL, 15958903035313115662ULL,
+    11336038170342247032ULL, 11560342382835141123ULL, 6212009033479929024ULL,
+    8214308203775021229ULL, 8475469210070503698ULL, 13287024123485719563ULL,
+    12956951963817520723ULL, 10693035819908470465ULL, 11375478788224786725ULL,
+    16934625208487120398ULL, 10094585729115874495ULL, 2763884524395905776ULL,
+    13535890148969964883ULL, 13514657411765064358ULL, 9903074440788027562ULL,
+    17324720726421199990ULL, 2273931039117368789ULL, 3442641041506157854ULL, 1119853641236409612ULL,
+    12037070344296077989ULL, 581736433335671746ULL, 6019150647054369174ULL, 14864096138068789375ULL,
+    6652995210998318662ULL, 12773883697029175304ULL, 12751275631451845119ULL,
+    11449095003038250478ULL, 1025805267334366480ULL, 2764432500300815015ULL,
+    18274564429002844381ULL, 10445634195592600351ULL, 11814099592837202735ULL,
+    5006796893679120289ULL, 6908397253997261914ULL, 13266696965302879279ULL, 7768715053015037430ULL,
+    3569923738654785686ULL, 5844853453464857549ULL, 1837340805629559110ULL, 1034657624388283114ULL,
+    711244516069456460ULL, 12519286026957934814ULL, 2613464944620837619ULL, 10003023321338286213ULL,
+    7291332092642881376ULL, 9832199564117004897ULL, 3280736694860799890ULL, 6416452202849179874ULL,
+    7326961381798642069ULL, 8435688798040635029ULL, 16630141263910982958ULL,
+    17222635514422533318ULL, 9482787389178881499ULL, 836561194658263905ULL, 3405319043337616649ULL,
+    2786146577568026518ULL, 7625483685691626321ULL, 6728084875304656716ULL, 1140997959232544268ULL,
+    12847384827606303792ULL, 1719121337754572070ULL, 12863589482936438532ULL,
+    3880712899640530862ULL, 2748456882813671564ULL, 4775988900044623019ULL, 8937847374382191162ULL,
+    3767367347172252295ULL, 13468672401049388646ULL, 14359032216842397576ULL,
+    2002555958685443975ULL, 16488678606651526810ULL, 11826135409597474760ULL,
+    15296495673182508601ULL
   };
 
 static const
 uint64_t
 Hacl_P256_PrecompTable_precomp_basepoint_table_w5[384U] =
   {
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)0U, (uint64_t)8784043285714375740U,
-    (uint64_t)8483257759279461889U, (uint64_t)8789745728267363600U, (uint64_t)1770019616739251654U,
-    (uint64_t)15992936863339206154U, (uint64_t)10037038012062884956U,
-    (uint64_t)15197544864945402661U, (uint64_t)9615747158586711429U, (uint64_t)1U,
-    (uint64_t)18446744069414584320U, (uint64_t)18446744073709551615U, (uint64_t)4294967294U,
-    (uint64_t)10634854829044225757U, (uint64_t)351552716085025155U, (uint64_t)10645315080955407736U,
-    (uint64_t)3609262091244858135U, (uint64_t)15760741698986874125U,
-    (uint64_t)14936374388219697827U, (uint64_t)15751360096993017895U,
-    (uint64_t)18012233706239762398U, (uint64_t)1993877568177495041U,
-    (uint64_t)10345888787846536528U, (uint64_t)7746511691117935375U,
-    (uint64_t)14517043990409914413U, (uint64_t)14122549297570634151U,
-    (uint64_t)16934610359517083771U, (uint64_t)5724511325497097418U, (uint64_t)8983432969107448705U,
-    (uint64_t)2687429970334080245U, (uint64_t)16525396802810050288U, (uint64_t)7602596488871585854U,
-    (uint64_t)4813919589149203084U, (uint64_t)7680395813780804519U, (uint64_t)6687709583048023590U,
-    (uint64_t)18086445169104142027U, (uint64_t)9637814708330203929U,
-    (uint64_t)14785108459960679090U, (uint64_t)3838023279095023581U, (uint64_t)3555615526157830307U,
-    (uint64_t)5177066488380472871U, (uint64_t)18218186719108038403U,
-    (uint64_t)16281556341699656105U, (uint64_t)1524227924561461191U, (uint64_t)4148060517641909597U,
-    (uint64_t)2858290374115363433U, (uint64_t)8942772026334130620U, (uint64_t)3034451298319885113U,
-    (uint64_t)8447866036736640940U, (uint64_t)11204933433076256578U,
-    (uint64_t)18333595740249588297U, (uint64_t)8259597024804538246U, (uint64_t)9539734295777539786U,
-    (uint64_t)9797290423046626413U, (uint64_t)5777303437849646537U, (uint64_t)8739356909899132020U,
-    (uint64_t)14815960973766782158U, (uint64_t)15286581798204509801U,
-    (uint64_t)17597362577777019682U, (uint64_t)13259283710820519742U,
-    (uint64_t)10501322996899164670U, (uint64_t)1221138904338319642U,
-    (uint64_t)14586685489551951885U, (uint64_t)895326705426031212U, (uint64_t)14398171728560617847U,
-    (uint64_t)9592550823745097391U, (uint64_t)17240998489162206026U, (uint64_t)8085479283308189196U,
-    (uint64_t)14844657737893882826U, (uint64_t)15923425394150618234U,
-    (uint64_t)2997808084773249525U, (uint64_t)494323555453660587U, (uint64_t)1215695327517794764U,
-    (uint64_t)9476207381098391690U, (uint64_t)7480789678419122995U, (uint64_t)15212230329321082489U,
-    (uint64_t)436189395349576388U, (uint64_t)17377474396456660834U, (uint64_t)15237013929655017939U,
-    (uint64_t)11444428846883781676U, (uint64_t)5112749694521428575U, (uint64_t)950829367509872073U,
-    (uint64_t)17665036182057559519U, (uint64_t)17205133339690002313U,
-    (uint64_t)16233765170251334549U, (uint64_t)10122775683257972591U,
-    (uint64_t)3352514236455632420U, (uint64_t)9143148522359954691U, (uint64_t)601191684005658860U,
-    (uint64_t)13398772186646349998U, (uint64_t)15512696600132928431U,
-    (uint64_t)9128416073728948653U, (uint64_t)11233051033546138578U, (uint64_t)6769345682610122833U,
-    (uint64_t)10823233224575054288U, (uint64_t)9997725227559980175U, (uint64_t)6733425642852897415U,
-    (uint64_t)16302206918151466066U, (uint64_t)1669330822143265921U, (uint64_t)2661645605036546002U,
-    (uint64_t)17182558479745802165U, (uint64_t)1165082692376932040U, (uint64_t)9470595929011488359U,
-    (uint64_t)6142147329285324932U, (uint64_t)4829075085998111287U, (uint64_t)10231370681107338930U,
-    (uint64_t)9591876895322495239U, (uint64_t)10316468561384076618U,
-    (uint64_t)11592503647238064235U, (uint64_t)13395813606055179632U, (uint64_t)511127033980815508U,
-    (uint64_t)12434976573147649880U, (uint64_t)3425094795384359127U, (uint64_t)6816971736303023445U,
-    (uint64_t)15444670609021139344U, (uint64_t)9464349818322082360U,
-    (uint64_t)16178216413042376883U, (uint64_t)9595540370774317348U, (uint64_t)7229365182662875710U,
-    (uint64_t)4601177649460012843U, (uint64_t)5455046447382487090U, (uint64_t)10854066421606187521U,
-    (uint64_t)15913416821879788071U, (uint64_t)2297365362023460173U, (uint64_t)2603252216454941350U,
-    (uint64_t)6768791943870490934U, (uint64_t)15705936687122754810U, (uint64_t)9537096567546600694U,
-    (uint64_t)17580538144855035062U, (uint64_t)4496542856965746638U, (uint64_t)8444341625922124942U,
-    (uint64_t)12191263903636183168U, (uint64_t)17427332907535974165U,
-    (uint64_t)14307569739254103736U, (uint64_t)13900598742063266169U,
-    (uint64_t)7176996424355977650U, (uint64_t)5709008170379717479U, (uint64_t)14471312052264549092U,
-    (uint64_t)1464519909491759867U, (uint64_t)3328154641049602121U, (uint64_t)13020349337171136774U,
-    (uint64_t)2772166279972051938U, (uint64_t)10854476939425975292U, (uint64_t)1967189930534630940U,
-    (uint64_t)2802919076529341959U, (uint64_t)14792226094833519208U,
-    (uint64_t)14675640928566522177U, (uint64_t)14838974364643800837U,
-    (uint64_t)17631460696099549980U, (uint64_t)17434186275364935469U,
-    (uint64_t)2665648200587705473U, (uint64_t)13202122464492564051U, (uint64_t)7576287350918073341U,
-    (uint64_t)2272206013910186424U, (uint64_t)14558761641743937843U, (uint64_t)5675729149929979729U,
-    (uint64_t)9043135187561613166U, (uint64_t)11750149293830589225U, (uint64_t)740555197954307911U,
-    (uint64_t)9871738005087190699U, (uint64_t)17178667634283502053U,
-    (uint64_t)18046255991533013265U, (uint64_t)4458222096988430430U, (uint64_t)8452427758526311627U,
-    (uint64_t)13825286929656615266U, (uint64_t)13956286357198391218U,
-    (uint64_t)15875692916799995079U, (uint64_t)10634895319157013920U,
-    (uint64_t)13230116118036304207U, (uint64_t)8795317393614625606U, (uint64_t)7001710806858862020U,
-    (uint64_t)7949746088586183478U, (uint64_t)14677556044923602317U,
-    (uint64_t)11184023437485843904U, (uint64_t)11215864722023085094U,
-    (uint64_t)6444464081471519014U, (uint64_t)1706241174022415217U, (uint64_t)8243975633057550613U,
-    (uint64_t)15502902453836085864U, (uint64_t)3799182188594003953U, (uint64_t)3538840175098724094U,
-    (uint64_t)13240193491554624643U, (uint64_t)12365034249541329920U,
-    (uint64_t)2924326828590977357U, (uint64_t)5687195797140589099U, (uint64_t)16880427227292834531U,
-    (uint64_t)9691471435758991112U, (uint64_t)16642385273732487288U,
-    (uint64_t)12173806747523009914U, (uint64_t)13142722756877876849U,
-    (uint64_t)8370377548305121979U, (uint64_t)17988526053752025426U, (uint64_t)4818750752684100334U,
-    (uint64_t)5669241919350361655U, (uint64_t)4964810303238518540U, (uint64_t)16709712747671533191U,
-    (uint64_t)4461414404267448242U, (uint64_t)3971798785139504238U, (uint64_t)6276818948740422136U,
-    (uint64_t)1426735892164275762U, (uint64_t)7943622674892418919U, (uint64_t)9864274225563929680U,
-    (uint64_t)57815533745003233U, (uint64_t)10893588105168960233U, (uint64_t)15739162732907069535U,
-    (uint64_t)3923866849462073470U, (uint64_t)12279826158399226875U, (uint64_t)1533015761334846582U,
-    (uint64_t)15860156818568437510U, (uint64_t)8252625373831297988U, (uint64_t)9666953804812706358U,
-    (uint64_t)8767785238646914634U, (uint64_t)14382179044941403551U,
-    (uint64_t)10401039907264254245U, (uint64_t)8584860003763157350U, (uint64_t)3120462679504470266U,
-    (uint64_t)8670255778748340069U, (uint64_t)5313789577940369984U, (uint64_t)16977072364454789224U,
-    (uint64_t)12199578693972188324U, (uint64_t)18211098771672599237U,
-    (uint64_t)12868831556008795030U, (uint64_t)5310155061431048194U,
-    (uint64_t)18114153238435112606U, (uint64_t)14482365809278304512U,
-    (uint64_t)12520721662723001511U, (uint64_t)405943624021143002U, (uint64_t)8146944101507657423U,
-    (uint64_t)181739317780393495U, (uint64_t)81743892273670099U, (uint64_t)14759561962550473930U,
-    (uint64_t)4592623849546992939U, (uint64_t)6916440441743449719U, (uint64_t)1304610503530809833U,
-    (uint64_t)5464930909232486441U, (uint64_t)15414883617496224671U, (uint64_t)8129283345256790U,
-    (uint64_t)18294252198413739489U, (uint64_t)17394115281884857288U,
-    (uint64_t)7808348415224731235U, (uint64_t)13195566655747230608U, (uint64_t)8568194219353949094U,
-    (uint64_t)15329813048672122440U, (uint64_t)9604275495885785744U, (uint64_t)1577712551205219835U,
-    (uint64_t)15964209008022052790U, (uint64_t)15087297920782098160U,
-    (uint64_t)3946031512438511898U, (uint64_t)10050061168984440631U,
-    (uint64_t)11382452014533138316U, (uint64_t)6313670788911952792U,
-    (uint64_t)12015989229696164014U, (uint64_t)5946702628076168852U, (uint64_t)5219995658774362841U,
-    (uint64_t)12230141881068377972U, (uint64_t)12361195202673441956U,
-    (uint64_t)4732862275653856711U, (uint64_t)17221430380805252370U,
-    (uint64_t)15397525953897375810U, (uint64_t)16557437297239563045U,
-    (uint64_t)10101683801868971351U, (uint64_t)1402611372245592868U, (uint64_t)1931806383735563658U,
-    (uint64_t)10991705207471512479U, (uint64_t)861333583207471392U, (uint64_t)15207766844626322355U,
-    (uint64_t)9224628129811432393U, (uint64_t)3497069567089055613U, (uint64_t)11956632757898590316U,
-    (uint64_t)8733729372586312960U, (uint64_t)18091521051714930927U, (uint64_t)77582787724373283U,
-    (uint64_t)9922437373519669237U, (uint64_t)3079321456325704615U, (uint64_t)12171198408512478457U,
-    (uint64_t)17179130884012147596U, (uint64_t)6839115479620367181U, (uint64_t)4421032569964105406U,
-    (uint64_t)10353331468657256053U, (uint64_t)17400988720335968824U,
-    (uint64_t)17138855889417480540U, (uint64_t)4507980080381370611U,
-    (uint64_t)10703175719793781886U, (uint64_t)12598516658725890426U,
-    (uint64_t)8353463412173898932U, (uint64_t)17703029389228422404U, (uint64_t)9313111267107226233U,
-    (uint64_t)5441322942995154196U, (uint64_t)8952817660034465484U, (uint64_t)17571113341183703118U,
-    (uint64_t)7375087953801067019U, (uint64_t)13381466302076453648U, (uint64_t)3218165271423914596U,
-    (uint64_t)16956372157249382685U, (uint64_t)509080090049418841U, (uint64_t)13374233893294084913U,
-    (uint64_t)2988537624204297086U, (uint64_t)4979195832939384620U, (uint64_t)3803931594068976394U,
-    (uint64_t)10731535883829627646U, (uint64_t)12954845047607194278U,
-    (uint64_t)10494298062560667399U, (uint64_t)4967351022190213065U,
-    (uint64_t)13391917938145756456U, (uint64_t)951370484866918160U, (uint64_t)13531334179067685307U,
-    (uint64_t)12868421357919390599U, (uint64_t)15918857042998130258U,
-    (uint64_t)17769743831936974016U, (uint64_t)7137921979260368809U,
-    (uint64_t)12461369180685892062U, (uint64_t)827476514081935199U, (uint64_t)15107282134224767230U,
-    (uint64_t)10084765752802805748U, (uint64_t)3303739059392464407U,
-    (uint64_t)17859532612136591428U, (uint64_t)10949414770405040164U,
-    (uint64_t)12838613589371008785U, (uint64_t)5554397169231540728U,
-    (uint64_t)18375114572169624408U, (uint64_t)15649286703242390139U,
-    (uint64_t)2957281557463706877U, (uint64_t)14000350446219393213U,
-    (uint64_t)14355199721749620351U, (uint64_t)2730856240099299695U,
-    (uint64_t)17528131000714705752U, (uint64_t)2537498525883536360U, (uint64_t)6121058967084509393U,
-    (uint64_t)16897667060435514221U, (uint64_t)12367869599571112440U,
-    (uint64_t)3388831797050807508U, (uint64_t)16791449724090982798U, (uint64_t)2673426123453294928U,
-    (uint64_t)11369313542384405846U, (uint64_t)15641960333586432634U,
-    (uint64_t)15080962589658958379U, (uint64_t)7747943772340226569U, (uint64_t)8075023376199159152U,
-    (uint64_t)8485093027378306528U, (uint64_t)13503706844122243648U, (uint64_t)8401961362938086226U,
-    (uint64_t)8125426002124226402U, (uint64_t)9005399361407785203U, (uint64_t)6847968030066906634U,
-    (uint64_t)11934937736309295197U, (uint64_t)5116750888594772351U, (uint64_t)2817039227179245227U,
-    (uint64_t)17724206901239332980U, (uint64_t)4985702708254058578U, (uint64_t)5786345435756642871U,
-    (uint64_t)17772527414940936938U, (uint64_t)1201320251272957006U,
-    (uint64_t)15787430120324348129U, (uint64_t)6305488781359965661U,
-    (uint64_t)12423900845502858433U, (uint64_t)17485949424202277720U,
-    (uint64_t)2062237315546855852U, (uint64_t)10353639467860902375U, (uint64_t)2315398490451287299U,
-    (uint64_t)15394572894814882621U, (uint64_t)232866113801165640U, (uint64_t)7413443736109338926U,
-    (uint64_t)902719806551551191U, (uint64_t)16568853118619045174U, (uint64_t)14202214862428279177U,
-    (uint64_t)11719595395278861192U, (uint64_t)5890053236389907647U, (uint64_t)9996196494965833627U,
-    (uint64_t)12967056942364782577U, (uint64_t)9034128755157395787U,
-    (uint64_t)17898204904710512655U, (uint64_t)8229373445062993977U,
-    (uint64_t)13580036169519833644U
+    0ULL, 0ULL, 0ULL, 0ULL, 1ULL, 18446744069414584320ULL, 18446744073709551615ULL, 4294967294ULL,
+    0ULL, 0ULL, 0ULL, 0ULL, 8784043285714375740ULL, 8483257759279461889ULL, 8789745728267363600ULL,
+    1770019616739251654ULL, 15992936863339206154ULL, 10037038012062884956ULL,
+    15197544864945402661ULL, 9615747158586711429ULL, 1ULL, 18446744069414584320ULL,
+    18446744073709551615ULL, 4294967294ULL, 10634854829044225757ULL, 351552716085025155ULL,
+    10645315080955407736ULL, 3609262091244858135ULL, 15760741698986874125ULL,
+    14936374388219697827ULL, 15751360096993017895ULL, 18012233706239762398ULL,
+    1993877568177495041ULL, 10345888787846536528ULL, 7746511691117935375ULL,
+    14517043990409914413ULL, 14122549297570634151ULL, 16934610359517083771ULL,
+    5724511325497097418ULL, 8983432969107448705ULL, 2687429970334080245ULL, 16525396802810050288ULL,
+    7602596488871585854ULL, 4813919589149203084ULL, 7680395813780804519ULL, 6687709583048023590ULL,
+    18086445169104142027ULL, 9637814708330203929ULL, 14785108459960679090ULL,
+    3838023279095023581ULL, 3555615526157830307ULL, 5177066488380472871ULL, 18218186719108038403ULL,
+    16281556341699656105ULL, 1524227924561461191ULL, 4148060517641909597ULL, 2858290374115363433ULL,
+    8942772026334130620ULL, 3034451298319885113ULL, 8447866036736640940ULL, 11204933433076256578ULL,
+    18333595740249588297ULL, 8259597024804538246ULL, 9539734295777539786ULL, 9797290423046626413ULL,
+    5777303437849646537ULL, 8739356909899132020ULL, 14815960973766782158ULL,
+    15286581798204509801ULL, 17597362577777019682ULL, 13259283710820519742ULL,
+    10501322996899164670ULL, 1221138904338319642ULL, 14586685489551951885ULL, 895326705426031212ULL,
+    14398171728560617847ULL, 9592550823745097391ULL, 17240998489162206026ULL,
+    8085479283308189196ULL, 14844657737893882826ULL, 15923425394150618234ULL,
+    2997808084773249525ULL, 494323555453660587ULL, 1215695327517794764ULL, 9476207381098391690ULL,
+    7480789678419122995ULL, 15212230329321082489ULL, 436189395349576388ULL, 17377474396456660834ULL,
+    15237013929655017939ULL, 11444428846883781676ULL, 5112749694521428575ULL, 950829367509872073ULL,
+    17665036182057559519ULL, 17205133339690002313ULL, 16233765170251334549ULL,
+    10122775683257972591ULL, 3352514236455632420ULL, 9143148522359954691ULL, 601191684005658860ULL,
+    13398772186646349998ULL, 15512696600132928431ULL, 9128416073728948653ULL,
+    11233051033546138578ULL, 6769345682610122833ULL, 10823233224575054288ULL,
+    9997725227559980175ULL, 6733425642852897415ULL, 16302206918151466066ULL, 1669330822143265921ULL,
+    2661645605036546002ULL, 17182558479745802165ULL, 1165082692376932040ULL, 9470595929011488359ULL,
+    6142147329285324932ULL, 4829075085998111287ULL, 10231370681107338930ULL, 9591876895322495239ULL,
+    10316468561384076618ULL, 11592503647238064235ULL, 13395813606055179632ULL,
+    511127033980815508ULL, 12434976573147649880ULL, 3425094795384359127ULL, 6816971736303023445ULL,
+    15444670609021139344ULL, 9464349818322082360ULL, 16178216413042376883ULL,
+    9595540370774317348ULL, 7229365182662875710ULL, 4601177649460012843ULL, 5455046447382487090ULL,
+    10854066421606187521ULL, 15913416821879788071ULL, 2297365362023460173ULL,
+    2603252216454941350ULL, 6768791943870490934ULL, 15705936687122754810ULL, 9537096567546600694ULL,
+    17580538144855035062ULL, 4496542856965746638ULL, 8444341625922124942ULL,
+    12191263903636183168ULL, 17427332907535974165ULL, 14307569739254103736ULL,
+    13900598742063266169ULL, 7176996424355977650ULL, 5709008170379717479ULL,
+    14471312052264549092ULL, 1464519909491759867ULL, 3328154641049602121ULL,
+    13020349337171136774ULL, 2772166279972051938ULL, 10854476939425975292ULL,
+    1967189930534630940ULL, 2802919076529341959ULL, 14792226094833519208ULL,
+    14675640928566522177ULL, 14838974364643800837ULL, 17631460696099549980ULL,
+    17434186275364935469ULL, 2665648200587705473ULL, 13202122464492564051ULL,
+    7576287350918073341ULL, 2272206013910186424ULL, 14558761641743937843ULL, 5675729149929979729ULL,
+    9043135187561613166ULL, 11750149293830589225ULL, 740555197954307911ULL, 9871738005087190699ULL,
+    17178667634283502053ULL, 18046255991533013265ULL, 4458222096988430430ULL,
+    8452427758526311627ULL, 13825286929656615266ULL, 13956286357198391218ULL,
+    15875692916799995079ULL, 10634895319157013920ULL, 13230116118036304207ULL,
+    8795317393614625606ULL, 7001710806858862020ULL, 7949746088586183478ULL, 14677556044923602317ULL,
+    11184023437485843904ULL, 11215864722023085094ULL, 6444464081471519014ULL,
+    1706241174022415217ULL, 8243975633057550613ULL, 15502902453836085864ULL, 3799182188594003953ULL,
+    3538840175098724094ULL, 13240193491554624643ULL, 12365034249541329920ULL,
+    2924326828590977357ULL, 5687195797140589099ULL, 16880427227292834531ULL, 9691471435758991112ULL,
+    16642385273732487288ULL, 12173806747523009914ULL, 13142722756877876849ULL,
+    8370377548305121979ULL, 17988526053752025426ULL, 4818750752684100334ULL, 5669241919350361655ULL,
+    4964810303238518540ULL, 16709712747671533191ULL, 4461414404267448242ULL, 3971798785139504238ULL,
+    6276818948740422136ULL, 1426735892164275762ULL, 7943622674892418919ULL, 9864274225563929680ULL,
+    57815533745003233ULL, 10893588105168960233ULL, 15739162732907069535ULL, 3923866849462073470ULL,
+    12279826158399226875ULL, 1533015761334846582ULL, 15860156818568437510ULL,
+    8252625373831297988ULL, 9666953804812706358ULL, 8767785238646914634ULL, 14382179044941403551ULL,
+    10401039907264254245ULL, 8584860003763157350ULL, 3120462679504470266ULL, 8670255778748340069ULL,
+    5313789577940369984ULL, 16977072364454789224ULL, 12199578693972188324ULL,
+    18211098771672599237ULL, 12868831556008795030ULL, 5310155061431048194ULL,
+    18114153238435112606ULL, 14482365809278304512ULL, 12520721662723001511ULL,
+    405943624021143002ULL, 8146944101507657423ULL, 181739317780393495ULL, 81743892273670099ULL,
+    14759561962550473930ULL, 4592623849546992939ULL, 6916440441743449719ULL, 1304610503530809833ULL,
+    5464930909232486441ULL, 15414883617496224671ULL, 8129283345256790ULL, 18294252198413739489ULL,
+    17394115281884857288ULL, 7808348415224731235ULL, 13195566655747230608ULL,
+    8568194219353949094ULL, 15329813048672122440ULL, 9604275495885785744ULL, 1577712551205219835ULL,
+    15964209008022052790ULL, 15087297920782098160ULL, 3946031512438511898ULL,
+    10050061168984440631ULL, 11382452014533138316ULL, 6313670788911952792ULL,
+    12015989229696164014ULL, 5946702628076168852ULL, 5219995658774362841ULL,
+    12230141881068377972ULL, 12361195202673441956ULL, 4732862275653856711ULL,
+    17221430380805252370ULL, 15397525953897375810ULL, 16557437297239563045ULL,
+    10101683801868971351ULL, 1402611372245592868ULL, 1931806383735563658ULL,
+    10991705207471512479ULL, 861333583207471392ULL, 15207766844626322355ULL, 9224628129811432393ULL,
+    3497069567089055613ULL, 11956632757898590316ULL, 8733729372586312960ULL,
+    18091521051714930927ULL, 77582787724373283ULL, 9922437373519669237ULL, 3079321456325704615ULL,
+    12171198408512478457ULL, 17179130884012147596ULL, 6839115479620367181ULL,
+    4421032569964105406ULL, 10353331468657256053ULL, 17400988720335968824ULL,
+    17138855889417480540ULL, 4507980080381370611ULL, 10703175719793781886ULL,
+    12598516658725890426ULL, 8353463412173898932ULL, 17703029389228422404ULL,
+    9313111267107226233ULL, 5441322942995154196ULL, 8952817660034465484ULL, 17571113341183703118ULL,
+    7375087953801067019ULL, 13381466302076453648ULL, 3218165271423914596ULL,
+    16956372157249382685ULL, 509080090049418841ULL, 13374233893294084913ULL, 2988537624204297086ULL,
+    4979195832939384620ULL, 3803931594068976394ULL, 10731535883829627646ULL,
+    12954845047607194278ULL, 10494298062560667399ULL, 4967351022190213065ULL,
+    13391917938145756456ULL, 951370484866918160ULL, 13531334179067685307ULL,
+    12868421357919390599ULL, 15918857042998130258ULL, 17769743831936974016ULL,
+    7137921979260368809ULL, 12461369180685892062ULL, 827476514081935199ULL, 15107282134224767230ULL,
+    10084765752802805748ULL, 3303739059392464407ULL, 17859532612136591428ULL,
+    10949414770405040164ULL, 12838613589371008785ULL, 5554397169231540728ULL,
+    18375114572169624408ULL, 15649286703242390139ULL, 2957281557463706877ULL,
+    14000350446219393213ULL, 14355199721749620351ULL, 2730856240099299695ULL,
+    17528131000714705752ULL, 2537498525883536360ULL, 6121058967084509393ULL,
+    16897667060435514221ULL, 12367869599571112440ULL, 3388831797050807508ULL,
+    16791449724090982798ULL, 2673426123453294928ULL, 11369313542384405846ULL,
+    15641960333586432634ULL, 15080962589658958379ULL, 7747943772340226569ULL,
+    8075023376199159152ULL, 8485093027378306528ULL, 13503706844122243648ULL, 8401961362938086226ULL,
+    8125426002124226402ULL, 9005399361407785203ULL, 6847968030066906634ULL, 11934937736309295197ULL,
+    5116750888594772351ULL, 2817039227179245227ULL, 17724206901239332980ULL, 4985702708254058578ULL,
+    5786345435756642871ULL, 17772527414940936938ULL, 1201320251272957006ULL,
+    15787430120324348129ULL, 6305488781359965661ULL, 12423900845502858433ULL,
+    17485949424202277720ULL, 2062237315546855852ULL, 10353639467860902375ULL,
+    2315398490451287299ULL, 15394572894814882621ULL, 232866113801165640ULL, 7413443736109338926ULL,
+    902719806551551191ULL, 16568853118619045174ULL, 14202214862428279177ULL,
+    11719595395278861192ULL, 5890053236389907647ULL, 9996196494965833627ULL,
+    12967056942364782577ULL, 9034128755157395787ULL, 17898204904710512655ULL,
+    8229373445062993977ULL, 13580036169519833644ULL
   };
 
 #if defined(__cplusplus)
diff --git a/include/msvc/internal/Hacl_SHA2_Types.h b/include/msvc/internal/Hacl_SHA2_Types.h
index 1e51a0f1..5a1eb668 100644
--- a/include/msvc/internal/Hacl_SHA2_Types.h
+++ b/include/msvc/internal/Hacl_SHA2_Types.h
@@ -35,68 +35,68 @@ extern "C" {
 #include "krml/lowstar_endianness.h"
 #include "krml/internal/target.h"
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_2p_s
+typedef struct Hacl_Hash_SHA2_uint8_2p_s
 {
   uint8_t *fst;
   uint8_t *snd;
 }
-Hacl_Impl_SHA2_Types_uint8_2p;
+Hacl_Hash_SHA2_uint8_2p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_3p_s
+typedef struct Hacl_Hash_SHA2_uint8_3p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_2p snd;
+  Hacl_Hash_SHA2_uint8_2p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_3p;
+Hacl_Hash_SHA2_uint8_3p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_4p_s
+typedef struct Hacl_Hash_SHA2_uint8_4p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_3p snd;
+  Hacl_Hash_SHA2_uint8_3p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_4p;
+Hacl_Hash_SHA2_uint8_4p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_5p_s
+typedef struct Hacl_Hash_SHA2_uint8_5p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_4p snd;
+  Hacl_Hash_SHA2_uint8_4p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_5p;
+Hacl_Hash_SHA2_uint8_5p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_6p_s
+typedef struct Hacl_Hash_SHA2_uint8_6p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_5p snd;
+  Hacl_Hash_SHA2_uint8_5p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_6p;
+Hacl_Hash_SHA2_uint8_6p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_7p_s
+typedef struct Hacl_Hash_SHA2_uint8_7p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_6p snd;
+  Hacl_Hash_SHA2_uint8_6p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_7p;
+Hacl_Hash_SHA2_uint8_7p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_8p_s
+typedef struct Hacl_Hash_SHA2_uint8_8p_s
 {
   uint8_t *fst;
-  Hacl_Impl_SHA2_Types_uint8_7p snd;
+  Hacl_Hash_SHA2_uint8_7p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_8p;
+Hacl_Hash_SHA2_uint8_8p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_2x4p_s
+typedef struct Hacl_Hash_SHA2_uint8_2x4p_s
 {
-  Hacl_Impl_SHA2_Types_uint8_4p fst;
-  Hacl_Impl_SHA2_Types_uint8_4p snd;
+  Hacl_Hash_SHA2_uint8_4p fst;
+  Hacl_Hash_SHA2_uint8_4p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_2x4p;
+Hacl_Hash_SHA2_uint8_2x4p;
 
-typedef struct Hacl_Impl_SHA2_Types_uint8_2x8p_s
+typedef struct Hacl_Hash_SHA2_uint8_2x8p_s
 {
-  Hacl_Impl_SHA2_Types_uint8_8p fst;
-  Hacl_Impl_SHA2_Types_uint8_8p snd;
+  Hacl_Hash_SHA2_uint8_8p fst;
+  Hacl_Hash_SHA2_uint8_8p snd;
 }
-Hacl_Impl_SHA2_Types_uint8_2x8p;
+Hacl_Hash_SHA2_uint8_2x8p;
 
 #if defined(__cplusplus)
 }
diff --git a/src/EverCrypt_AEAD.c b/src/EverCrypt_AEAD.c
index d3a4ffbe..b0fb4826 100644
--- a/src/EverCrypt_AEAD.c
+++ b/src/EverCrypt_AEAD.c
@@ -46,8 +46,8 @@ The state may be reused as many times as desired.
 */
 bool EverCrypt_AEAD_uu___is_Ek(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s projectee)
 {
-  KRML_HOST_IGNORE(a);
-  KRML_HOST_IGNORE(projectee);
+  KRML_MAYBE_UNUSED_VAR(a);
+  KRML_MAYBE_UNUSED_VAR(projectee);
   return true;
 }
 
@@ -86,11 +86,11 @@ Spec_Agile_AEAD_alg EverCrypt_AEAD_alg_of_state(EverCrypt_AEAD_state_s *s)
 static EverCrypt_Error_error_code
 create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+  uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
   EverCrypt_AEAD_state_s
   *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
   p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek });
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   dst[0U] = p;
   return EverCrypt_Error_Success;
 }
@@ -98,8 +98,8 @@ create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 static EverCrypt_Error_error_code
 create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(k);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
   #if HACL_CAN_COMPILE_VALE
   bool has_aesni = EverCrypt_AutoConfig2_has_aesni();
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
@@ -108,11 +108,11 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
   bool has_movbe = EverCrypt_AutoConfig2_has_movbe();
   if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe)
   {
-    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)480U, sizeof (uint8_t));
+    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(480U, sizeof (uint8_t));
     uint8_t *keys_b = ek;
-    uint8_t *hkeys_b = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b, hkeys_b));
+    uint8_t *hkeys_b = ek + 176U;
+    aes128_key_expansion(k, keys_b);
+    aes128_keyhash_init(keys_b, hkeys_b);
     EverCrypt_AEAD_state_s
     *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
     p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek });
@@ -128,8 +128,8 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 static EverCrypt_Error_error_code
 create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(k);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
   #if HACL_CAN_COMPILE_VALE
   bool has_aesni = EverCrypt_AutoConfig2_has_aesni();
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
@@ -138,11 +138,11 @@ create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
   bool has_movbe = EverCrypt_AutoConfig2_has_movbe();
   if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe)
   {
-    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)544U, sizeof (uint8_t));
+    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(544U, sizeof (uint8_t));
     uint8_t *keys_b = ek;
-    uint8_t *hkeys_b = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b, hkeys_b));
+    uint8_t *hkeys_b = ek + 240U;
+    aes256_key_expansion(k, keys_b);
+    aes256_keyhash_init(keys_b, hkeys_b);
     EverCrypt_AEAD_state_s
     *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
     p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek });
@@ -208,115 +208,106 @@ encrypt_aes128_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)304U;
+  uint8_t *scratch_b = ek + 304U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *plain_b_ = plain;
   uint8_t *out_b_ = cipher;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    plain + plain_len_,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+  memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+  if (len128x6 / 16ULL >= 18ULL)
   {
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    gcm128_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
-    KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
+    gcm128_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
-  memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
   return EverCrypt_Error_Success;
   #else
   KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n",
@@ -340,115 +331,106 @@ encrypt_aes256_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)368U;
+  uint8_t *scratch_b = ek + 368U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *plain_b_ = plain;
   uint8_t *out_b_ = cipher;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    plain + plain_len_,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+  memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+  if (len128x6 / 16ULL >= 18ULL)
   {
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    gcm256_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
-    KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
+    gcm256_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
-  memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
   return EverCrypt_Error_Success;
   #else
   KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n",
@@ -510,7 +492,7 @@ EverCrypt_AEAD_encrypt(
       }
     case Spec_Cipher_Expansion_Hacl_CHACHA20:
       {
-        if (iv_len != (uint32_t)12U)
+        if (iv_len != 12U)
         {
           return EverCrypt_Error_InvalidIVLength;
         }
@@ -546,124 +528,115 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[480U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-  KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 176U;
+  aes128_key_expansion(k, keys_b0);
+  aes128_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
   }
-  else if (iv_len == (uint32_t)0U)
+  else if (iv_len == 0U)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
   }
   else
   {
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)304U;
+    uint8_t *scratch_b = ek0 + 304U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+    uint8_t *hkeys_b = ek1 + 176U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *plain_b_ = plain;
     uint8_t *out_b_ = cipher;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      plain + plain_len_,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+    memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+    if (len128x6 / 16ULL >= 18ULL)
     {
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      gcm128_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
-      KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
+      gcm128_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
-    memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
     KRML_HOST_IGNORE(EverCrypt_Error_Success);
   }
   return EverCrypt_Error_Success;
@@ -697,124 +670,115 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[544U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-  KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 240U;
+  aes256_key_expansion(k, keys_b0);
+  aes256_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
   }
-  else if (iv_len == (uint32_t)0U)
+  else if (iv_len == 0U)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
   }
   else
   {
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)368U;
+    uint8_t *scratch_b = ek0 + 368U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+    uint8_t *hkeys_b = ek1 + 240U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *plain_b_ = plain;
     uint8_t *out_b_ = cipher;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      plain + plain_len_,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+    memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+    if (len128x6 / 16ULL >= 18ULL)
     {
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      gcm256_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
-      KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
+      gcm256_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
-    memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
     KRML_HOST_IGNORE(EverCrypt_Error_Success);
   }
   return EverCrypt_Error_Success;
@@ -840,15 +804,15 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -859,112 +823,103 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm(
   {
     uint8_t ek[480U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 176U;
+    aes128_key_expansion(k, keys_b0);
+    aes128_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
     }
-    else if (iv_len == (uint32_t)0U)
+    else if (iv_len == 0U)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
     }
     else
     {
       uint8_t *ek0 = (*s).ek;
-      uint8_t *scratch_b = ek0 + (uint32_t)304U;
+      uint8_t *scratch_b = ek0 + 304U;
       uint8_t *ek1 = ek0;
       uint8_t *keys_b = ek1;
-      uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+      uint8_t *hkeys_b = ek1 + 176U;
       uint8_t tmp_iv[16U] = { 0U };
-      uint32_t len = iv_len / (uint32_t)16U;
-      uint32_t bytes_len = len * (uint32_t)16U;
+      uint32_t len = iv_len / 16U;
+      uint32_t bytes_len = len * 16U;
       uint8_t *iv_b = iv;
-      memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-      KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-          (uint64_t)iv_len,
-          (uint64_t)len,
-          tmp_iv,
-          tmp_iv,
-          hkeys_b));
+      memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+      compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
       uint8_t *inout_b = scratch_b;
-      uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-      uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+      uint8_t *abytes_b = scratch_b + 16U;
+      uint8_t *scratch_b1 = scratch_b + 32U;
+      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
       uint8_t *plain_b_ = plain;
       uint8_t *out_b_ = cipher;
       uint8_t *auth_b_ = ad;
-      memcpy(inout_b,
-        plain + plain_len_,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-      memcpy(abytes_b,
-        ad + auth_len_,
-        (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-      uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-      if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+      memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+      memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+      uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+      if (len128x6 / 16ULL >= 18ULL)
       {
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
         uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128x6_ = len128x6 / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        gcm128_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
       else
       {
-        uint32_t len128x61 = (uint32_t)0U;
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+        uint32_t len128x61 = 0U;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + len128x61;
         uint8_t *out128_b = out_b_ + len128x61;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        uint64_t len128x6_ = (uint64_t)0U;
-        KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        uint64_t len128x6_ = 0ULL;
+        gcm128_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
-      memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+      memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
         inout_b,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+        (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
       KRML_HOST_IGNORE(EverCrypt_Error_Success);
     }
     return EverCrypt_Error_Success;
@@ -988,15 +943,15 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -1007,112 +962,103 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm(
   {
     uint8_t ek[544U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 240U;
+    aes256_key_expansion(k, keys_b0);
+    aes256_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
     }
-    else if (iv_len == (uint32_t)0U)
+    else if (iv_len == 0U)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
     }
     else
     {
       uint8_t *ek0 = (*s).ek;
-      uint8_t *scratch_b = ek0 + (uint32_t)368U;
+      uint8_t *scratch_b = ek0 + 368U;
       uint8_t *ek1 = ek0;
       uint8_t *keys_b = ek1;
-      uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+      uint8_t *hkeys_b = ek1 + 240U;
       uint8_t tmp_iv[16U] = { 0U };
-      uint32_t len = iv_len / (uint32_t)16U;
-      uint32_t bytes_len = len * (uint32_t)16U;
+      uint32_t len = iv_len / 16U;
+      uint32_t bytes_len = len * 16U;
       uint8_t *iv_b = iv;
-      memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-      KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-          (uint64_t)iv_len,
-          (uint64_t)len,
-          tmp_iv,
-          tmp_iv,
-          hkeys_b));
+      memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+      compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
       uint8_t *inout_b = scratch_b;
-      uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-      uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+      uint8_t *abytes_b = scratch_b + 16U;
+      uint8_t *scratch_b1 = scratch_b + 32U;
+      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
       uint8_t *plain_b_ = plain;
       uint8_t *out_b_ = cipher;
       uint8_t *auth_b_ = ad;
-      memcpy(inout_b,
-        plain + plain_len_,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-      memcpy(abytes_b,
-        ad + auth_len_,
-        (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-      uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-      if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+      memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+      memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+      uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+      if (len128x6 / 16ULL >= 18ULL)
       {
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
         uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128x6_ = len128x6 / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        gcm256_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
       else
       {
-        uint32_t len128x61 = (uint32_t)0U;
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+        uint32_t len128x61 = 0U;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + len128x61;
         uint8_t *out128_b = out_b_ + len128x61;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        uint64_t len128x6_ = (uint64_t)0U;
-        KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        uint64_t len128x6_ = 0ULL;
+        gcm256_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
-      memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+      memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
         inout_b,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+        (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
       KRML_HOST_IGNORE(EverCrypt_Error_Success);
     }
     return EverCrypt_Error_Success;
@@ -1136,10 +1082,10 @@ EverCrypt_AEAD_encrypt_expand_chacha20_poly1305(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(iv_len);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
   uint8_t ek[32U] = { 0U };
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek };
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   EverCrypt_AEAD_state_s *s = &p;
   uint8_t *ek0 = (*s).ek;
   EverCrypt_Chacha20Poly1305_aead_encrypt(ek0, iv, ad_len, ad, plain_len, plain, cipher, tag);
@@ -1222,66 +1168,57 @@ decrypt_aes128_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)304U;
+  uint8_t *scratch_b = ek + 304U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1305,15 +1242,15 @@ decrypt_aes128_gcm(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1335,11 +1272,11 @@ decrypt_aes128_gcm(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1366,66 +1303,57 @@ decrypt_aes256_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)368U;
+  uint8_t *scratch_b = ek + 368U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1449,15 +1377,15 @@ decrypt_aes256_gcm(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1479,11 +1407,11 @@ decrypt_aes256_gcm(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1514,14 +1442,14 @@ decrypt_chacha20_poly1305(
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len != (uint32_t)12U)
+  if (iv_len != 12U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
   uint32_t
   r = EverCrypt_Chacha20Poly1305_aead_decrypt(ek, iv, ad_len, ad, cipher_len, dst, cipher, tag);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return EverCrypt_Error_Success;
   }
@@ -1620,73 +1548,64 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[480U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-  KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 176U;
+  aes128_key_expansion(k, keys_b0);
+  aes128_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek0 = (*s).ek;
-  uint8_t *scratch_b = ek0 + (uint32_t)304U;
+  uint8_t *scratch_b = ek0 + 304U;
   uint8_t *ek1 = ek0;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1710,15 +1629,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1740,11 +1659,11 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1779,73 +1698,64 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[544U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-  KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 240U;
+  aes256_key_expansion(k, keys_b0);
+  aes256_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek0 = (*s).ek;
-  uint8_t *scratch_b = ek0 + (uint32_t)368U;
+  uint8_t *scratch_b = ek0 + 368U;
   uint8_t *ek1 = ek0;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1869,15 +1779,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1899,11 +1809,11 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1930,15 +1840,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -1949,61 +1859,52 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
   {
     uint8_t ek[480U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 176U;
+    aes128_key_expansion(k, keys_b0);
+    aes128_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       return EverCrypt_Error_InvalidKey;
     }
-    if (iv_len == (uint32_t)0U)
+    if (iv_len == 0U)
     {
       return EverCrypt_Error_InvalidIVLength;
     }
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)304U;
+    uint8_t *scratch_b = ek0 + 304U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+    uint8_t *hkeys_b = ek1 + 176U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *cipher_b_ = cipher;
     uint8_t *out_b_ = dst;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      cipher + cipher_len_,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+    memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
     uint64_t c;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+    if (len128x6 / 16ULL >= 6ULL)
     {
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
       uint64_t
       c0 =
         gcm128_decrypt_opt(auth_b_,
@@ -2027,15 +1928,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
       uint64_t
       c0 =
         gcm128_decrypt_opt(auth_b_,
@@ -2057,11 +1958,11 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
           tag);
       c = c0;
     }
-    memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
     uint64_t r = c;
-    if (r == (uint64_t)0U)
+    if (r == 0ULL)
     {
       return EverCrypt_Error_Success;
     }
@@ -2086,15 +1987,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -2105,61 +2006,52 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
   {
     uint8_t ek[544U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 240U;
+    aes256_key_expansion(k, keys_b0);
+    aes256_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       return EverCrypt_Error_InvalidKey;
     }
-    if (iv_len == (uint32_t)0U)
+    if (iv_len == 0U)
     {
       return EverCrypt_Error_InvalidIVLength;
     }
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)368U;
+    uint8_t *scratch_b = ek0 + 368U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+    uint8_t *hkeys_b = ek1 + 240U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *cipher_b_ = cipher;
     uint8_t *out_b_ = dst;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      cipher + cipher_len_,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+    memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
     uint64_t c;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+    if (len128x6 / 16ULL >= 6ULL)
     {
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
       uint64_t
       c0 =
         gcm256_decrypt_opt(auth_b_,
@@ -2183,15 +2075,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
       uint64_t
       c0 =
         gcm256_decrypt_opt(auth_b_,
@@ -2213,11 +2105,11 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
           tag);
       c = c0;
     }
-    memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
     uint64_t r = c;
-    if (r == (uint64_t)0U)
+    if (r == 0ULL)
     {
       return EverCrypt_Error_Success;
     }
@@ -2244,7 +2136,7 @@ EverCrypt_AEAD_decrypt_expand_chacha20_poly1305(
 {
   uint8_t ek[32U] = { 0U };
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek };
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   EverCrypt_AEAD_state_s *s = &p;
   EverCrypt_Error_error_code
   r = decrypt_chacha20_poly1305(s, iv, iv_len, ad, ad_len, cipher, cipher_len, tag, dst);
diff --git a/src/EverCrypt_AutoConfig2.c b/src/EverCrypt_AutoConfig2.c
index b549d020..5a92d995 100644
--- a/src/EverCrypt_AutoConfig2.c
+++ b/src/EverCrypt_AutoConfig2.c
@@ -113,59 +113,59 @@ void EverCrypt_AutoConfig2_recall(void)
 void EverCrypt_AutoConfig2_init(void)
 {
   #if HACL_CAN_COMPILE_VALE
-  if (check_aesni() != (uint64_t)0U)
+  if (check_aesni() != 0ULL)
   {
     cpu_has_aesni[0U] = true;
     cpu_has_pclmulqdq[0U] = true;
   }
-  if (check_sha() != (uint64_t)0U)
+  if (check_sha() != 0ULL)
   {
     cpu_has_shaext[0U] = true;
   }
-  if (check_adx_bmi2() != (uint64_t)0U)
+  if (check_adx_bmi2() != 0ULL)
   {
     cpu_has_bmi2[0U] = true;
     cpu_has_adx[0U] = true;
   }
-  if (check_avx() != (uint64_t)0U)
+  if (check_avx() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
         cpu_has_avx[0U] = true;
       }
     }
   }
-  if (check_avx2() != (uint64_t)0U)
+  if (check_avx2() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
         cpu_has_avx2[0U] = true;
       }
     }
   }
-  if (check_sse() != (uint64_t)0U)
+  if (check_sse() != 0ULL)
   {
     cpu_has_sse[0U] = true;
   }
-  if (check_movbe() != (uint64_t)0U)
+  if (check_movbe() != 0ULL)
   {
     cpu_has_movbe[0U] = true;
   }
-  if (check_rdrand() != (uint64_t)0U)
+  if (check_rdrand() != 0ULL)
   {
     cpu_has_rdrand[0U] = true;
   }
-  if (check_avx512() != (uint64_t)0U)
+  if (check_avx512() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
-        if (check_avx512_xcr0() != (uint64_t)0U)
+        if (check_avx512_xcr0() != 0ULL)
         {
           cpu_has_avx512[0U] = true;
           return;
diff --git a/src/EverCrypt_Chacha20Poly1305.c b/src/EverCrypt_Chacha20Poly1305.c
index 9a110bbf..e762f031 100644
--- a/src/EverCrypt_Chacha20Poly1305.c
+++ b/src/EverCrypt_Chacha20Poly1305.c
@@ -44,22 +44,22 @@ EverCrypt_Chacha20Poly1305_aead_encrypt(
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+    KRML_MAYBE_UNUSED_VAR(vec128);
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher, tag, m, mlen, aad, aadlen, k, n);
     return;
   }
   #endif
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+    KRML_MAYBE_UNUSED_VAR(vec256);
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher, tag, m, mlen, aad, aadlen, k, n);
     return;
   }
   #endif
-  KRML_HOST_IGNORE(vec128);
-  KRML_HOST_IGNORE(vec256);
-  Hacl_Chacha20Poly1305_32_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+  KRML_MAYBE_UNUSED_VAR(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
+  Hacl_AEAD_Chacha20Poly1305_encrypt(cipher, tag, m, mlen, aad, aadlen, k, n);
 }
 
 uint32_t
@@ -79,19 +79,19 @@ EverCrypt_Chacha20Poly1305_aead_decrypt(
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
-    return Hacl_Chacha20Poly1305_256_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+    KRML_MAYBE_UNUSED_VAR(vec128);
+    return Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(m, cipher, mlen, aad, aadlen, k, n, tag);
   }
   #endif
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
-    return Hacl_Chacha20Poly1305_128_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+    KRML_MAYBE_UNUSED_VAR(vec256);
+    return Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(m, cipher, mlen, aad, aadlen, k, n, tag);
   }
   #endif
-  KRML_HOST_IGNORE(vec128);
-  KRML_HOST_IGNORE(vec256);
-  return Hacl_Chacha20Poly1305_32_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+  KRML_MAYBE_UNUSED_VAR(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
+  return Hacl_AEAD_Chacha20Poly1305_decrypt(m, cipher, mlen, aad, aadlen, k, n, tag);
 }
 
diff --git a/src/EverCrypt_DRBG.c b/src/EverCrypt_DRBG.c
index 13e517e5..301fe528 100644
--- a/src/EverCrypt_DRBG.c
+++ b/src/EverCrypt_DRBG.c
@@ -28,15 +28,15 @@
 #include "internal/EverCrypt_HMAC.h"
 #include "lib_memzero0.h"
 
-uint32_t EverCrypt_DRBG_reseed_interval = (uint32_t)1024U;
+uint32_t EverCrypt_DRBG_reseed_interval = 1024U;
 
-uint32_t EverCrypt_DRBG_max_output_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_output_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_personalization_string_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_personalization_string_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_additional_input_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_additional_input_length = 65536U;
 
 uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
 {
@@ -44,19 +44,19 @@ uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     default:
       {
@@ -92,7 +92,7 @@ EverCrypt_DRBG_uu___is_SHA1_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA1_s)
   {
     return true;
@@ -106,7 +106,7 @@ EverCrypt_DRBG_uu___is_SHA2_256_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_256_s)
   {
     return true;
@@ -120,7 +120,7 @@ EverCrypt_DRBG_uu___is_SHA2_384_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_384_s)
   {
     return true;
@@ -134,7 +134,7 @@ EverCrypt_DRBG_uu___is_SHA2_512_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_512_s)
   {
     return true;
@@ -149,10 +149,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -164,10 +164,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -179,10 +179,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -194,10 +194,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -247,7 +247,7 @@ instantiate_sha1(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t entropy[min_entropy];
@@ -282,45 +282,43 @@ instantiate_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)20U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 20U * sizeof (uint8_t));
+  memset(v, 1U, 20U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 21U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U,
+    memcpy(input0 + 21U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 21U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)21U,
+      memcpy(input + 21U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
   return true;
 }
@@ -337,7 +335,7 @@ instantiate_sha2_256(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t entropy[min_entropy];
@@ -372,45 +370,43 @@ instantiate_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)32U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 32U * sizeof (uint8_t));
+  memset(v, 1U, 32U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 33U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U,
+    memcpy(input0 + 33U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 33U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)33U,
+      memcpy(input + 33U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
   return true;
 }
@@ -427,7 +423,7 @@ instantiate_sha2_384(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t entropy[min_entropy];
@@ -462,45 +458,43 @@ instantiate_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)48U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 48U * sizeof (uint8_t));
+  memset(v, 1U, 48U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 49U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U,
+    memcpy(input0 + 49U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 49U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)49U,
+      memcpy(input + 49U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
   return true;
 }
@@ -517,7 +511,7 @@ instantiate_sha2_512(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t entropy[min_entropy];
@@ -552,45 +546,43 @@ instantiate_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)64U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 64U * sizeof (uint8_t));
+  memset(v, 1U, 64U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 65U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U,
+    memcpy(input0 + 65U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 65U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)65U,
+      memcpy(input + 65U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
   return true;
 }
@@ -635,42 +627,42 @@ reseed_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 21U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U,
+    memcpy(input0 + 21U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 21U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)21U,
+      memcpy(input + 21U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -714,42 +706,42 @@ reseed_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 33U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U,
+    memcpy(input0 + 33U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 33U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)33U,
+      memcpy(input + 33U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -793,42 +785,42 @@ reseed_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 49U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U,
+    memcpy(input0 + 49U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 49U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)49U,
+      memcpy(input + 49U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -872,42 +864,42 @@ reseed_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 65U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U,
+    memcpy(input0 + 65U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 65U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)65U,
+      memcpy(input + 65U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -970,42 +962,42 @@ generate_sha1(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 21U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t input0[input_len];
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 20U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)21U,
+        memcpy(input0 + 21U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[20U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-      EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-      memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[20U] = 0U;
+      EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+      EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+      memcpy(k, k_, 20U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 21U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t input[input_len0];
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)21U,
+          memcpy(input + 21U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[20U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-        EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+        input[20U] = 1U;
+        EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+        EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+        memcpy(k, k_0, 20U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1015,16 +1007,16 @@ generate_sha1(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA1_s)
   {
-    x1 = st_s.case_SHA1_s;
+    ite = st_s.case_SHA1_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1040,87 +1032,87 @@ generate_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)21U + additional_input_len;
+    uint32_t input_len = 21U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t input0[input_len];
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[20U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-    EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[20U] = 0U;
+    EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+    EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+    memcpy(k, k_, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+      uint32_t input_len0 = 21U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t input[input_len0];
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 20U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[20U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-      EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-      memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+      input[20U] = 1U;
+      EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+      EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+      memcpy(k, k_0, 20U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)20U;
+  uint32_t max = n / 20U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha1(v, k, 20U, v, 20U);
+    memcpy(out + i * 20U, v, 20U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)20U < n)
+  if (max * 20U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)20U;
-    EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 20U;
+    EverCrypt_HMAC_compute_sha1(v, k, 20U, v, 20U);
+    memcpy(block, v, (n - max * 20U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)21U + additional_input_len;
+  uint32_t input_len = 21U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+    uint32_t input_len0 = 21U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1183,42 +1175,42 @@ generate_sha2_256(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 33U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t input0[input_len];
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 32U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)33U,
+        memcpy(input0 + 33U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[32U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-      memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[32U] = 0U;
+      EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+      memcpy(k, k_, 32U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 33U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t input[input_len0];
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)33U,
+          memcpy(input + 33U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[32U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+        input[32U] = 1U;
+        EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+        memcpy(k, k_0, 32U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1228,16 +1220,16 @@ generate_sha2_256(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_256_s)
   {
-    x1 = st_s.case_SHA2_256_s;
+    ite = st_s.case_SHA2_256_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1253,87 +1245,87 @@ generate_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)33U + additional_input_len;
+    uint32_t input_len = 33U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t input0[input_len];
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[32U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[32U] = 0U;
+    EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+    memcpy(k, k_, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+      uint32_t input_len0 = 33U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t input[input_len0];
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 32U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[32U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-      memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+      input[32U] = 1U;
+      EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+      memcpy(k, k_0, 32U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)32U;
+  uint32_t max = n / 32U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+    memcpy(out + i * 32U, v, 32U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)32U < n)
+  if (max * 32U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)32U;
-    EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 32U;
+    EverCrypt_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+    memcpy(block, v, (n - max * 32U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)33U + additional_input_len;
+  uint32_t input_len = 33U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+    uint32_t input_len0 = 33U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1396,42 +1388,42 @@ generate_sha2_384(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 49U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t input0[input_len];
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 48U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)49U,
+        memcpy(input0 + 49U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[48U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-      memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[48U] = 0U;
+      EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+      memcpy(k, k_, 48U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 49U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t input[input_len0];
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)49U,
+          memcpy(input + 49U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[48U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+        input[48U] = 1U;
+        EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+        memcpy(k, k_0, 48U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1441,16 +1433,16 @@ generate_sha2_384(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_384_s)
   {
-    x1 = st_s.case_SHA2_384_s;
+    ite = st_s.case_SHA2_384_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1466,87 +1458,87 @@ generate_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)49U + additional_input_len;
+    uint32_t input_len = 49U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t input0[input_len];
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[48U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[48U] = 0U;
+    EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+    memcpy(k, k_, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+      uint32_t input_len0 = 49U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t input[input_len0];
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 48U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[48U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-      memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+      input[48U] = 1U;
+      EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+      memcpy(k, k_0, 48U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)48U;
+  uint32_t max = n / 48U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+    memcpy(out + i * 48U, v, 48U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)48U < n)
+  if (max * 48U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)48U;
-    EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 48U;
+    EverCrypt_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+    memcpy(block, v, (n - max * 48U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)49U + additional_input_len;
+  uint32_t input_len = 49U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+    uint32_t input_len0 = 49U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1609,42 +1601,42 @@ generate_sha2_512(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 65U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t input0[input_len];
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 64U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)65U,
+        memcpy(input0 + 65U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[64U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-      memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[64U] = 0U;
+      EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+      memcpy(k, k_, 64U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 65U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t input[input_len0];
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)65U,
+          memcpy(input + 65U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[64U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+        input[64U] = 1U;
+        EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+        memcpy(k, k_0, 64U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1654,16 +1646,16 @@ generate_sha2_512(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_512_s)
   {
-    x1 = st_s.case_SHA2_512_s;
+    ite = st_s.case_SHA2_512_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1679,87 +1671,87 @@ generate_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)65U + additional_input_len;
+    uint32_t input_len = 65U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t input0[input_len];
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[64U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[64U] = 0U;
+    EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+    memcpy(k, k_, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+      uint32_t input_len0 = 65U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t input[input_len0];
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 64U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[64U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-      memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+      input[64U] = 1U;
+      EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+      memcpy(k, k_0, 64U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)64U;
+  uint32_t max = n / 64U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+    memcpy(out + i * 64U, v, 64U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)64U < n)
+  if (max * 64U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)64U;
-    EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 64U;
+    EverCrypt_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+    memcpy(block, v, (n - max * 64U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)65U + additional_input_len;
+  uint32_t input_len = 65U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t input0[input_len];
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+    uint32_t input_len0 = 65U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t input[input_len0];
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1778,9 +1770,9 @@ static void uninstantiate_sha1(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)20U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)20U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 20U, uint8_t);
+  Lib_Memzero0_memzero(v, 20U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1802,9 +1794,9 @@ static void uninstantiate_sha2_256(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)32U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 32U, uint8_t);
+  Lib_Memzero0_memzero(v, 32U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1826,9 +1818,9 @@ static void uninstantiate_sha2_384(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)48U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 48U, uint8_t);
+  Lib_Memzero0_memzero(v, 48U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1850,9 +1842,9 @@ static void uninstantiate_sha2_512(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)64U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 64U, uint8_t);
+  Lib_Memzero0_memzero(v, 64U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
diff --git a/src/EverCrypt_HKDF.c b/src/EverCrypt_HKDF.c
index 796a8424..773f86b8 100644
--- a/src/EverCrypt_HKDF.c
+++ b/src/EverCrypt_HKDF.c
@@ -37,39 +37,39 @@ expand_sha1(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)20U;
+  uint32_t tlen = 20U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -92,39 +92,39 @@ expand_sha2_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -147,39 +147,39 @@ expand_sha2_384(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)48U;
+  uint32_t tlen = 48U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -202,39 +202,39 @@ expand_sha2_512(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -257,39 +257,39 @@ expand_blake2s(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -312,39 +312,39 @@ expand_blake2b(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/EverCrypt_HMAC.c b/src/EverCrypt_HMAC.c
index 91440e61..90bcaaac 100644
--- a/src/EverCrypt_HMAC.c
+++ b/src/EverCrypt_HMAC.c
@@ -28,7 +28,9 @@
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b.h"
+#include "internal/Hacl_HMAC.h"
 #include "internal/EverCrypt_Hash.h"
 
 bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___)
@@ -67,7 +69,7 @@ bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___)
 }
 
 void
-(*EverCrypt_HMAC_hash_256)(uint8_t *x0, uint32_t x1, uint8_t *x2) =
+(*EverCrypt_HMAC_hash_256)(uint8_t *x0, uint8_t *x1, uint32_t x2) =
   EverCrypt_Hash_Incremental_hash_256;
 
 void
@@ -79,68 +81,63 @@ EverCrypt_HMAC_compute_sha1(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)20U;
+    ite = 20U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Hash_SHA1_legacy_hash(key, key_len, nkey);
+    Hacl_Hash_SHA1_hash_oneshot(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U);
+    Hacl_Hash_SHA1_update_last(s, 0ULL, ipad, 64U);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -152,25 +149,21 @@ EverCrypt_HMAC_compute_sha1(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U);
-    Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-    Hacl_Hash_SHA1_legacy_update_last(s,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-      rem,
-      rem_len);
+    Hacl_Hash_SHA1_update_multi(s, ipad, 1U);
+    Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks);
+    Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   }
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst1);
+  Hacl_Hash_SHA1_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_Hash_Core_SHA1_legacy_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)20U / block_len;
-  uint32_t rem0 = (uint32_t)20U % block_len;
+  Hacl_Hash_SHA1_init(s);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 20U / block_len;
+  uint32_t rem0 = 20U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)20U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len });
   }
   else
   {
@@ -181,13 +174,10 @@ EverCrypt_HMAC_compute_sha1(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U);
-  Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-  Hacl_Hash_SHA1_legacy_update_last(s,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-    rem,
-    rem_len);
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
+  Hacl_Hash_SHA1_update_multi(s, opad, 1U);
+  Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks);
+  Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
+  Hacl_Hash_SHA1_finish(s, dst);
 }
 
 void
@@ -199,74 +189,71 @@ EverCrypt_HMAC_compute_sha2_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    EverCrypt_HMAC_hash_256(key, key_len, nkey);
+    EverCrypt_HMAC_hash_256(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t x = Hacl_Hash_SHA2_h256[i];
     os[i] = x;);
   uint32_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)0U + (uint64_t)(uint32_t)64U,
-      (uint32_t)64U,
-      ipad,
-      s);
+    Hacl_Hash_SHA2_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -278,27 +265,24 @@ EverCrypt_HMAC_compute_sha2_256(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    EverCrypt_Hash_update_multi_256(s, ipad, (uint32_t)1U);
+    EverCrypt_Hash_update_multi_256(s, ipad, 1U);
     EverCrypt_Hash_update_multi_256(s, full_blocks, n_blocks);
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
-      + (uint64_t)full_blocks_len
-      + (uint64_t)rem_len,
+    Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len,
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst1);
+  Hacl_Hash_SHA2_sha256_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha256_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_SHA2_sha256_init(s);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -309,15 +293,13 @@ EverCrypt_HMAC_compute_sha2_256(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  EverCrypt_Hash_update_multi_256(s, opad, (uint32_t)1U);
+  EverCrypt_Hash_update_multi_256(s, opad, 1U);
   EverCrypt_Hash_update_multi_256(s, full_blocks, n_blocks);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
-    + (uint64_t)full_blocks_len
-    + (uint64_t)rem_len,
+  Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len,
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst);
+  Hacl_Hash_SHA2_sha256_finish(s, dst);
 }
 
 void
@@ -329,75 +311,75 @@ EverCrypt_HMAC_compute_sha2_384(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)48U;
+    ite = 48U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_384(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_384(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
+    uint64_t x = Hacl_Hash_SHA2_h384[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -409,27 +391,26 @@ EverCrypt_HMAC_compute_sha2_384(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_Hash_SHA2_sha384_update_nblocks(128U, ipad, s);
+    Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha384_finish(s, dst1);
+  Hacl_Hash_SHA2_sha384_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha384_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)48U / block_len;
-  uint32_t rem0 = (uint32_t)48U % block_len;
+  Hacl_Hash_SHA2_sha384_init(s);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 48U / block_len;
+  uint32_t rem0 = 48U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)48U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len });
   }
   else
   {
@@ -440,15 +421,15 @@ EverCrypt_HMAC_compute_sha2_384(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_Hash_SHA2_sha384_update_nblocks(128U, opad, s);
+  Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha384_finish(s, dst);
+  Hacl_Hash_SHA2_sha384_finish(s, dst);
 }
 
 void
@@ -460,75 +441,75 @@ EverCrypt_HMAC_compute_sha2_512(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_512(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_512(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
+    uint64_t x = Hacl_Hash_SHA2_h512[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -540,27 +521,26 @@ EverCrypt_HMAC_compute_sha2_512(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_Hash_SHA2_sha512_update_nblocks(128U, ipad, s);
+    Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha512_finish(s, dst1);
+  Hacl_Hash_SHA2_sha512_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha512_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_SHA2_sha512_init(s);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -571,15 +551,15 @@ EverCrypt_HMAC_compute_sha2_512(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_Hash_SHA2_sha512_update_nblocks(128U, opad, s);
+  Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha512_finish(s, dst);
+  Hacl_Hash_SHA2_sha512_finish(s, dst);
 }
 
 void
@@ -591,66 +571,66 @@ EverCrypt_HMAC_compute_blake2s(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_32_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2s_hash_with_key(nkey, 32U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t s[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Hash_Blake2s_init(s, 0U, 32U);
   uint32_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Hash_Blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -663,34 +643,33 @@ EverCrypt_HMAC_compute_blake2s(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     uint32_t wv0[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Hash_Blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
       full_blocks,
       n_blocks);
     uint32_t wv1[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last(rem_len,
+    Hacl_Hash_Blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Hash_Blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_32_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_Blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -702,22 +681,22 @@ EverCrypt_HMAC_compute_blake2s(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint32_t wv[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   uint32_t wv0[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Hash_Blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
     full_blocks,
     n_blocks);
   uint32_t wv1[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_last(rem_len,
+  Hacl_Hash_Blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Hash_Blake2s_finish(32U, dst, s0);
 }
 
 void
@@ -729,71 +708,66 @@ EverCrypt_HMAC_compute_blake2b(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_32_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2b_hash_with_key(nkey, 64U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t s[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Hash_Blake2b_init(s, 0U, 64U);
   uint64_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last((uint32_t)128U,
-      wv,
-      s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
-      ipad);
+    Hacl_Hash_Blake2b_update_last(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -806,40 +780,34 @@ EverCrypt_HMAC_compute_blake2b(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-      wv,
-      s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      ipad,
-      (uint32_t)1U);
+    Hacl_Hash_Blake2b_update_multi(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), ipad, 1U);
     uint64_t wv0[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Hash_Blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
       full_blocks,
       n_blocks);
     uint64_t wv1[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last(rem_len,
+    Hacl_Hash_Blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Hash_Blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_32_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_Blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -851,28 +819,23 @@ EverCrypt_HMAC_compute_blake2b(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint64_t wv[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-    wv,
-    s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-    opad,
-    (uint32_t)1U);
+  Hacl_Hash_Blake2b_update_multi(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), opad, 1U);
   uint64_t wv0[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Hash_Blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
     full_blocks,
     n_blocks);
   uint64_t wv1[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_last(rem_len,
+  Hacl_Hash_Blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Hash_Blake2b_finish(64U, dst, s0);
 }
 
 void
diff --git a/src/EverCrypt_Hash.c b/src/EverCrypt_Hash.c
index b88df9e2..1adf2f1d 100644
--- a/src/EverCrypt_Hash.c
+++ b/src/EverCrypt_Hash.c
@@ -31,6 +31,10 @@
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
 #include "internal/Hacl_Hash_MD5.h"
+#include "internal/Hacl_Hash_Blake2s_Simd128.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b_Simd256.h"
+#include "internal/Hacl_Hash_Blake2b.h"
 #include "config.h"
 
 #define MD5_s 0
@@ -146,61 +150,61 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = MD5_s, { .case_MD5_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA1_s, { .case_SHA1_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_224_s, { .case_SHA2_224_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_256_s, { .case_SHA2_256_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_384_s, { .case_SHA2_384_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_512_s, { .case_SHA2_512_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_224_s, { .case_SHA3_224_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_256_s, { .case_SHA3_256_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_384_s, { .case_SHA3_384_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_512_s, { .case_SHA3_512_s = buf } });
         break;
       }
@@ -214,17 +218,17 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
             (
               (EverCrypt_Hash_state_s){
                 .tag = Blake2S_128_s,
-                { .case_Blake2S_128_s = Hacl_Blake2s_128_blake2s_malloc() }
+                { .case_Blake2S_128_s = Hacl_Hash_Blake2s_Simd128_malloc_with_key() }
               }
             );
         }
         else
         {
-          uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+          uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
           s = ((EverCrypt_Hash_state_s){ .tag = Blake2S_s, { .case_Blake2S_s = buf } });
         }
         #else
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = Blake2S_s, { .case_Blake2S_s = buf } });
         #endif
         break;
@@ -239,17 +243,17 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
             (
               (EverCrypt_Hash_state_s){
                 .tag = Blake2B_256_s,
-                { .case_Blake2B_256_s = Hacl_Blake2b_256_blake2b_malloc() }
+                { .case_Blake2B_256_s = Hacl_Hash_Blake2b_Simd256_malloc_with_key() }
               }
             );
         }
         else
         {
-          uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+          uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
           s = ((EverCrypt_Hash_state_s){ .tag = Blake2B_s, { .case_Blake2B_s = buf } });
         }
         #else
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = Blake2B_s, { .case_Blake2B_s = buf } });
         #endif
         break;
@@ -272,94 +276,94 @@ static void init(EverCrypt_Hash_state_s *s)
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    Hacl_Hash_Core_MD5_legacy_init(p1);
+    Hacl_Hash_MD5_init(p1);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    Hacl_Hash_Core_SHA1_legacy_init(p1);
+    Hacl_Hash_SHA1_init(p1);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    Hacl_SHA2_Scalar32_sha224_init(p1);
+    Hacl_Hash_SHA2_sha224_init(p1);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    Hacl_SHA2_Scalar32_sha256_init(p1);
+    Hacl_Hash_SHA2_sha256_init(p1);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    Hacl_SHA2_Scalar32_sha384_init(p1);
+    Hacl_Hash_SHA2_sha384_init(p1);
     return;
   }
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    Hacl_SHA2_Scalar32_sha512_init(p1);
+    Hacl_Hash_SHA2_sha512_init(p1);
     return;
   }
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    Hacl_Blake2s_32_blake2s_init(p1, (uint32_t)0U, (uint32_t)32U);
+    Hacl_Hash_Blake2s_init(p1, 0U, 32U);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    Hacl_Blake2s_128_blake2s_init(p1, (uint32_t)0U, (uint32_t)32U);
+    Hacl_Hash_Blake2s_Simd128_init(p1, 0U, 32U);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    Hacl_Blake2b_32_blake2b_init(p1, (uint32_t)0U, (uint32_t)64U);
+    Hacl_Hash_Blake2b_init(p1, 0U, 64U);
     return;
   }
   if (scrut.tag == Blake2B_256_s)
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    Hacl_Blake2b_256_blake2b_init(p1, (uint32_t)0U, (uint32_t)64U);
+    Hacl_Hash_Blake2b_Simd256_init(p1, 0U, 64U);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -373,22 +377,16 @@ static void init(EverCrypt_Hash_state_s *s)
 static uint32_t
 k224_256[64U] =
   {
-    (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
-    (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
-    (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
-    (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
-    (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
-    (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
-    (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
-    (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
-    (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
-    (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
-    (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
-    (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
-    (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
-    (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
-    (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
-    (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+    0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U,
+    0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU,
+    0x9bdc06a7U, 0xc19bf174U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU,
+    0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U,
+    0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU,
+    0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU,
+    0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U,
+    0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
+    0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U,
+    0xc67178f2U
   };
 
 void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n)
@@ -399,13 +397,13 @@ void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n)
   if (has_shaext && has_sse)
   {
     uint64_t n1 = (uint64_t)n;
-    KRML_HOST_IGNORE(sha256_update(s, blocks, n1, k224_256));
+    sha256_update(s, blocks, n1, k224_256);
     return;
   }
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s);
+  Hacl_Hash_SHA2_sha256_update_nblocks(n * 64U, blocks, s);
   #else
   KRML_HOST_IGNORE(k224_256);
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s);
+  Hacl_Hash_SHA2_sha256_update_nblocks(n * 64U, blocks, s);
   #endif
 }
 
@@ -416,100 +414,100 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    uint32_t n = len / (uint32_t)64U;
-    Hacl_Hash_MD5_legacy_update_multi(p1, blocks, n);
+    uint32_t n = len / 64U;
+    Hacl_Hash_MD5_update_multi(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    uint32_t n = len / (uint32_t)64U;
-    Hacl_Hash_SHA1_legacy_update_multi(p1, blocks, n);
+    uint32_t n = len / 64U;
+    Hacl_Hash_SHA1_update_multi(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     EverCrypt_Hash_update_multi_256(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     EverCrypt_Hash_update_multi_256(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    uint32_t n = len / (uint32_t)128U;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n * (uint32_t)128U, blocks, p1);
+    uint32_t n = len / 128U;
+    Hacl_Hash_SHA2_sha384_update_nblocks(n * 128U, blocks, p1);
     return;
   }
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    uint32_t n = len / (uint32_t)128U;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n * (uint32_t)128U, blocks, p1);
+    uint32_t n = len / 128U;
+    Hacl_Hash_SHA2_sha512_update_nblocks(n * 128U, blocks, p1);
     return;
   }
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    uint32_t n = len / (uint32_t)144U;
+    uint32_t n = len / 144U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    uint32_t n = len / (uint32_t)136U;
+    uint32_t n = len / 136U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    uint32_t n = len / (uint32_t)104U;
+    uint32_t n = len / 104U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    uint32_t n = len / (uint32_t)72U;
+    uint32_t n = len / 72U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, p1, blocks, n);
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n * (uint32_t)64U, wv, p1, prevlen, blocks, n);
+    Hacl_Hash_Blake2s_update_multi(n * 64U, wv, p1, prevlen, blocks, n);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi(n * (uint32_t)64U, wv, p1, prevlen, blocks, n);
+    Hacl_Hash_Blake2s_Simd128_update_multi(n * 64U, wv, p1, prevlen, blocks, n);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    uint32_t n = len / (uint32_t)128U;
+    uint32_t n = len / 128U;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n * (uint32_t)128U,
+    Hacl_Hash_Blake2b_update_multi(n * 128U,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prevlen),
@@ -521,9 +519,9 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    uint32_t n = len / (uint32_t)128U;
+    uint32_t n = len / 128U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi(n * (uint32_t)128U,
+    Hacl_Hash_Blake2b_Simd256_update_multi(n * 128U,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prevlen),
@@ -531,7 +529,7 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
       n);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -549,31 +547,31 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    Hacl_Hash_MD5_legacy_update_last(p1, prev_len, last, last_len);
+    Hacl_Hash_MD5_update_last(p1, prev_len, last, last_len);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    Hacl_Hash_SHA1_legacy_update_last(p1, prev_len, last, last_len);
+    Hacl_Hash_SHA1_update_last(p1, prev_len, last, last_len);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    Hacl_SHA2_Scalar32_sha224_update_last(prev_len + (uint64_t)last_len, last_len, last, p1);
+    Hacl_Hash_SHA2_sha224_update_last(prev_len + (uint64_t)last_len, last_len, last, p1);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    Hacl_SHA2_Scalar32_sha256_update_last(prev_len + (uint64_t)last_len, last_len, last, p1);
+    Hacl_Hash_SHA2_sha256_update_last(prev_len + (uint64_t)last_len, last_len, last, p1);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len),
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len),
         FStar_UInt128_uint64_to_uint128((uint64_t)last_len)),
       last_len,
       last,
@@ -583,7 +581,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len),
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len),
         FStar_UInt128_uint64_to_uint128((uint64_t)last_len)),
       last_len,
       last,
@@ -618,7 +616,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last(last_len, wv, p1, prev_len, last_len, last);
+    Hacl_Hash_Blake2s_update_last(last_len, wv, p1, prev_len, last_len, last);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
@@ -626,10 +624,10 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_last(last_len, wv, p1, prev_len, last_len, last);
+    Hacl_Hash_Blake2s_Simd128_update_last(last_len, wv, p1, prev_len, last_len, last);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -637,7 +635,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last(last_len,
+    Hacl_Hash_Blake2b_update_last(last_len,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prev_len),
@@ -650,7 +648,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_last(last_len,
+    Hacl_Hash_Blake2b_Simd256_update_last(last_len,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prev_len),
@@ -658,7 +656,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
       last);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -675,94 +673,94 @@ static void finish(EverCrypt_Hash_state_s *s, uint8_t *dst)
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    Hacl_Hash_Core_MD5_legacy_finish(p1, dst);
+    Hacl_Hash_MD5_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    Hacl_Hash_Core_SHA1_legacy_finish(p1, dst);
+    Hacl_Hash_SHA1_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    Hacl_SHA2_Scalar32_sha224_finish(p1, dst);
+    Hacl_Hash_SHA2_sha224_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    Hacl_SHA2_Scalar32_sha256_finish(p1, dst);
+    Hacl_Hash_SHA2_sha256_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    Hacl_SHA2_Scalar32_sha384_finish(p1, dst);
+    Hacl_Hash_SHA2_sha384_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    Hacl_SHA2_Scalar32_sha512_finish(p1, dst);
+    Hacl_Hash_SHA2_sha512_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)144U, (uint32_t)28U, dst);
+    Hacl_Hash_SHA3_squeeze0(p1, 144U, 28U, dst);
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)136U, (uint32_t)32U, dst);
+    Hacl_Hash_SHA3_squeeze0(p1, 136U, 32U, dst);
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)104U, (uint32_t)48U, dst);
+    Hacl_Hash_SHA3_squeeze0(p1, 104U, 48U, dst);
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)72U, (uint32_t)64U, dst);
+    Hacl_Hash_SHA3_squeeze0(p1, 72U, 64U, dst);
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, p1);
+    Hacl_Hash_Blake2s_finish(32U, dst, p1);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, p1);
+    Hacl_Hash_Blake2s_Simd128_finish(32U, dst, p1);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, p1);
+    Hacl_Hash_Blake2b_finish(64U, dst, p1);
     return;
   }
   if (scrut.tag == Blake2B_256_s)
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, p1);
+    Hacl_Hash_Blake2b_Simd256_finish(64U, dst, p1);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -873,7 +871,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)4U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 4U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA1_s)
@@ -889,7 +887,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)5U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 5U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_224_s)
@@ -905,7 +903,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_256_s)
@@ -921,7 +919,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_384_s)
@@ -937,7 +935,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA2_512_s)
@@ -953,7 +951,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_224_s)
@@ -969,7 +967,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_256_s)
@@ -985,7 +983,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_384_s)
@@ -1001,7 +999,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_512_s)
@@ -1017,7 +1015,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == Blake2S_s)
@@ -1027,17 +1025,17 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2S_s)
     {
       uint32_t *p_dst = scrut.case_Blake2S_s;
-      memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint32_t));
+      memcpy(p_dst, p_src, 16U * sizeof (uint32_t));
       return;
     }
     if (scrut.tag == Blake2S_128_s)
     {
       Lib_IntVector_Intrinsics_vec128 *p_dst = scrut.case_Blake2S_128_s;
       #if HACL_CAN_COMPILE_VEC128
-      Hacl_Blake2s_128_load_state128s_from_state32(p_dst, p_src);
+      Hacl_Hash_Blake2s_Simd128_load_state128s_from_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1054,17 +1052,17 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2B_s)
     {
       uint64_t *p_dst = scrut.case_Blake2B_s;
-      memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint64_t));
+      memcpy(p_dst, p_src, 16U * sizeof (uint64_t));
       return;
     }
     if (scrut.tag == Blake2B_256_s)
     {
       Lib_IntVector_Intrinsics_vec256 *p_dst = scrut.case_Blake2B_256_s;
       #if HACL_CAN_COMPILE_VEC256
-      Hacl_Blake2b_256_load_state256b_from_state32(p_dst, p_src);
+      Hacl_Hash_Blake2b_Simd256_load_state256b_from_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1081,17 +1079,17 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2S_128_s)
     {
       Lib_IntVector_Intrinsics_vec128 *p_dst = scrut.case_Blake2S_128_s;
-      memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      memcpy(p_dst, p_src, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
       return;
     }
     if (scrut.tag == Blake2S_s)
     {
       uint32_t *p_dst = scrut.case_Blake2S_s;
       #if HACL_CAN_COMPILE_VEC128
-      Hacl_Blake2s_128_store_state128s_to_state32(p_dst, p_src);
+      Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1108,17 +1106,17 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2B_256_s)
     {
       Lib_IntVector_Intrinsics_vec256 *p_dst = scrut.case_Blake2B_256_s;
-      memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      memcpy(p_dst, p_src, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
       return;
     }
     if (scrut.tag == Blake2B_s)
     {
       uint64_t *p_dst = scrut.case_Blake2B_s;
       #if HACL_CAN_COMPILE_VEC256
-      Hacl_Blake2b_256_store_state256b_to_state32(p_dst, p_src);
+      Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1201,59 +1199,59 @@ static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     default:
       {
@@ -1269,18 +1267,18 @@ choice of algorithm (see Hacl_Spec.h). This API will automatically pick the most
 efficient implementation, provided you have called EverCrypt_AutoConfig2_init()
 before. The state is to be freed by calling `free`.
 */
-EverCrypt_Hash_Incremental_hash_state
-*EverCrypt_Hash_Incremental_create_in(Spec_Hash_Definitions_hash_alg a)
+EverCrypt_Hash_Incremental_state_t
+*EverCrypt_Hash_Incremental_malloc(Spec_Hash_Definitions_hash_alg a)
 {
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(a));
   uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
   EverCrypt_Hash_state_s *block_state = create_in(a);
-  EverCrypt_Hash_Incremental_hash_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  EverCrypt_Hash_Incremental_hash_state
+  EverCrypt_Hash_Incremental_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  EverCrypt_Hash_Incremental_state_t
   *p =
-    (EverCrypt_Hash_Incremental_hash_state *)KRML_HOST_MALLOC(sizeof (
-        EverCrypt_Hash_Incremental_hash_state
+    (EverCrypt_Hash_Incremental_state_t *)KRML_HOST_MALLOC(sizeof (
+        EverCrypt_Hash_Incremental_state_t
       ));
   p[0U] = s;
   init(block_state);
@@ -1290,17 +1288,17 @@ EverCrypt_Hash_Incremental_hash_state
 /**
 Reset an existing state to the initial hash state with empty data.
 */
-void EverCrypt_Hash_Incremental_init(EverCrypt_Hash_Incremental_hash_state *s)
+void EverCrypt_Hash_Incremental_reset(EverCrypt_Hash_Incremental_state_t *state)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *s;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   uint8_t *buf = scrut.buf;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   Spec_Hash_Definitions_hash_alg i = alg_of_state(block_state);
-  KRML_HOST_IGNORE(i);
+  KRML_MAYBE_UNUSED_VAR(i);
   init(block_state);
-  EverCrypt_Hash_Incremental_hash_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  EverCrypt_Hash_Incremental_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 /**
@@ -1312,86 +1310,86 @@ algorithm. Both limits are unlikely to be attained in practice.
 */
 EverCrypt_Error_error_code
 EverCrypt_Hash_Incremental_update(
-  EverCrypt_Hash_Incremental_hash_state *s,
-  uint8_t *data,
-  uint32_t len
+  EverCrypt_Hash_Incremental_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
 )
 {
-  EverCrypt_Hash_Incremental_hash_state s1 = *s;
-  EverCrypt_Hash_state_s *block_state = s1.block_state;
-  uint64_t total_len = s1.total_len;
+  EverCrypt_Hash_Incremental_state_t s = *state;
+  EverCrypt_Hash_state_s *block_state = s.block_state;
+  uint64_t total_len = s.total_len;
   Spec_Hash_Definitions_hash_alg i1 = alg_of_state(block_state);
   uint64_t sw;
   switch (i1)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     default:
@@ -1401,14 +1399,14 @@ EverCrypt_Hash_Incremental_update(
       }
   }
   Hacl_Streaming_Types_error_code ite;
-  if ((uint64_t)len > sw - total_len)
+  if ((uint64_t)chunk_len > sw - total_len)
   {
     ite = Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   else
   {
     uint32_t sz;
-    if (total_len % (uint64_t)block_len(i1) == (uint64_t)0U && total_len > (uint64_t)0U)
+    if (total_len % (uint64_t)block_len(i1) == 0ULL && total_len > 0ULL)
     {
       sz = block_len(i1);
     }
@@ -1416,14 +1414,14 @@ EverCrypt_Hash_Incremental_update(
     {
       sz = (uint32_t)(total_len % (uint64_t)block_len(i1));
     }
-    if (len <= block_len(i1) - sz)
+    if (chunk_len <= block_len(i1) - sz)
     {
-      EverCrypt_Hash_Incremental_hash_state s2 = *s;
-      EverCrypt_Hash_state_s *block_state1 = s2.block_state;
-      uint8_t *buf = s2.buf;
-      uint64_t total_len1 = s2.total_len;
+      EverCrypt_Hash_Incremental_state_t s1 = *state;
+      EverCrypt_Hash_state_s *block_state1 = s1.block_state;
+      uint8_t *buf = s1.buf;
+      uint64_t total_len1 = s1.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1432,26 +1430,26 @@ EverCrypt_Hash_Incremental_update(
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
       uint8_t *buf2 = buf + sz1;
-      memcpy(buf2, data, len * sizeof (uint8_t));
-      uint64_t total_len2 = total_len1 + (uint64_t)len;
-      *s
+      memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+      uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+      *state
       =
         (
-          (EverCrypt_Hash_Incremental_hash_state){
+          (EverCrypt_Hash_Incremental_state_t){
             .block_state = block_state1,
             .buf = buf,
             .total_len = total_len2
           }
         );
     }
-    else if (sz == (uint32_t)0U)
+    else if (sz == 0U)
     {
-      EverCrypt_Hash_Incremental_hash_state s2 = *s;
-      EverCrypt_Hash_state_s *block_state1 = s2.block_state;
-      uint8_t *buf = s2.buf;
-      uint64_t total_len1 = s2.total_len;
+      EverCrypt_Hash_Incremental_state_t s1 = *state;
+      EverCrypt_Hash_state_s *block_state1 = s1.block_state;
+      uint8_t *buf = s1.buf;
+      uint64_t total_len1 = s1.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1459,49 +1457,49 @@ EverCrypt_Hash_Incremental_update(
       {
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
-      if (!(sz1 == (uint32_t)0U))
+      if (!(sz1 == 0U))
       {
         uint64_t prevlen = total_len1 - (uint64_t)sz1;
         update_multi(block_state1, prevlen, buf, block_len(i1));
       }
       uint32_t ite0;
-      if ((uint64_t)len % (uint64_t)block_len(i1) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+      if ((uint64_t)chunk_len % (uint64_t)block_len(i1) == 0ULL && (uint64_t)chunk_len > 0ULL)
       {
         ite0 = block_len(i1);
       }
       else
       {
-        ite0 = (uint32_t)((uint64_t)len % (uint64_t)block_len(i1));
+        ite0 = (uint32_t)((uint64_t)chunk_len % (uint64_t)block_len(i1));
       }
-      uint32_t n_blocks = (len - ite0) / block_len(i1);
+      uint32_t n_blocks = (chunk_len - ite0) / block_len(i1);
       uint32_t data1_len = n_blocks * block_len(i1);
-      uint32_t data2_len = len - data1_len;
-      uint8_t *data1 = data;
-      uint8_t *data2 = data + data1_len;
+      uint32_t data2_len = chunk_len - data1_len;
+      uint8_t *data1 = chunk;
+      uint8_t *data2 = chunk + data1_len;
       update_multi(block_state1, total_len1, data1, data1_len);
       uint8_t *dst = buf;
       memcpy(dst, data2, data2_len * sizeof (uint8_t));
-      *s
+      *state
       =
         (
-          (EverCrypt_Hash_Incremental_hash_state){
+          (EverCrypt_Hash_Incremental_state_t){
             .block_state = block_state1,
             .buf = buf,
-            .total_len = total_len1 + (uint64_t)len
+            .total_len = total_len1 + (uint64_t)chunk_len
           }
         );
     }
     else
     {
       uint32_t diff = block_len(i1) - sz;
-      uint8_t *data1 = data;
-      uint8_t *data2 = data + diff;
-      EverCrypt_Hash_Incremental_hash_state s2 = *s;
-      EverCrypt_Hash_state_s *block_state10 = s2.block_state;
-      uint8_t *buf0 = s2.buf;
-      uint64_t total_len10 = s2.total_len;
+      uint8_t *chunk1 = chunk;
+      uint8_t *chunk2 = chunk + diff;
+      EverCrypt_Hash_Incremental_state_t s1 = *state;
+      EverCrypt_Hash_state_s *block_state10 = s1.block_state;
+      uint8_t *buf0 = s1.buf;
+      uint64_t total_len10 = s1.total_len;
       uint32_t sz10;
-      if (total_len10 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+      if (total_len10 % (uint64_t)block_len(i1) == 0ULL && total_len10 > 0ULL)
       {
         sz10 = block_len(i1);
       }
@@ -1510,23 +1508,23 @@ EverCrypt_Hash_Incremental_update(
         sz10 = (uint32_t)(total_len10 % (uint64_t)block_len(i1));
       }
       uint8_t *buf2 = buf0 + sz10;
-      memcpy(buf2, data1, diff * sizeof (uint8_t));
+      memcpy(buf2, chunk1, diff * sizeof (uint8_t));
       uint64_t total_len2 = total_len10 + (uint64_t)diff;
-      *s
+      *state
       =
         (
-          (EverCrypt_Hash_Incremental_hash_state){
+          (EverCrypt_Hash_Incremental_state_t){
             .block_state = block_state10,
             .buf = buf0,
             .total_len = total_len2
           }
         );
-      EverCrypt_Hash_Incremental_hash_state s20 = *s;
-      EverCrypt_Hash_state_s *block_state1 = s20.block_state;
-      uint8_t *buf = s20.buf;
-      uint64_t total_len1 = s20.total_len;
+      EverCrypt_Hash_Incremental_state_t s10 = *state;
+      EverCrypt_Hash_state_s *block_state1 = s10.block_state;
+      uint8_t *buf = s10.buf;
+      uint64_t total_len1 = s10.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1534,7 +1532,7 @@ EverCrypt_Hash_Incremental_update(
       {
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
-      if (!(sz1 == (uint32_t)0U))
+      if (!(sz1 == 0U))
       {
         uint64_t prevlen = total_len1 - (uint64_t)sz1;
         update_multi(block_state1, prevlen, buf, block_len(i1));
@@ -1542,33 +1540,33 @@ EverCrypt_Hash_Incremental_update(
       uint32_t ite0;
       if
       (
-        (uint64_t)(len - diff)
+        (uint64_t)(chunk_len - diff)
         % (uint64_t)block_len(i1)
-        == (uint64_t)0U
-        && (uint64_t)(len - diff) > (uint64_t)0U
+        == 0ULL
+        && (uint64_t)(chunk_len - diff) > 0ULL
       )
       {
         ite0 = block_len(i1);
       }
       else
       {
-        ite0 = (uint32_t)((uint64_t)(len - diff) % (uint64_t)block_len(i1));
+        ite0 = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)block_len(i1));
       }
-      uint32_t n_blocks = (len - diff - ite0) / block_len(i1);
+      uint32_t n_blocks = (chunk_len - diff - ite0) / block_len(i1);
       uint32_t data1_len = n_blocks * block_len(i1);
-      uint32_t data2_len = len - diff - data1_len;
-      uint8_t *data11 = data2;
-      uint8_t *data21 = data2 + data1_len;
-      update_multi(block_state1, total_len1, data11, data1_len);
+      uint32_t data2_len = chunk_len - diff - data1_len;
+      uint8_t *data1 = chunk2;
+      uint8_t *data2 = chunk2 + data1_len;
+      update_multi(block_state1, total_len1, data1, data1_len);
       uint8_t *dst = buf;
-      memcpy(dst, data21, data2_len * sizeof (uint8_t));
-      *s
+      memcpy(dst, data2, data2_len * sizeof (uint8_t));
+      *state
       =
         (
-          (EverCrypt_Hash_Incremental_hash_state){
+          (EverCrypt_Hash_Incremental_state_t){
             .block_state = block_state1,
             .buf = buf,
-            .total_len = total_len1 + (uint64_t)(len - diff)
+            .total_len = total_len1 + (uint64_t)(chunk_len - diff)
           }
         );
     }
@@ -1592,20 +1590,14 @@ EverCrypt_Hash_Incremental_update(
   }
 }
 
-static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_md5(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_MD5)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_MD5) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_MD5);
   }
@@ -1620,7 +1612,7 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_MD5) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_MD5) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_MD5);
   }
@@ -1630,26 +1622,20 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha1(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA1)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA1) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA1);
   }
@@ -1664,7 +1650,7 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA1) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA1) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA1);
   }
@@ -1674,26 +1660,21 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha224(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_224)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_224) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_224);
   }
@@ -1708,7 +1689,7 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_224) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_224) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_224);
   }
@@ -1718,26 +1699,21 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha256(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_256)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_256) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_256);
   }
@@ -1752,7 +1728,7 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_256) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_256) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_256);
   }
@@ -1762,26 +1738,21 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha3_224(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_224)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_224) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_224);
   }
@@ -1796,7 +1767,7 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_224) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_224) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_224);
   }
@@ -1806,26 +1777,21 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha3_256(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_256)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_256) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_256);
   }
@@ -1840,7 +1806,7 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_256) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_256) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_256);
   }
@@ -1850,26 +1816,21 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha3_384(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_384)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_384) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_384);
   }
@@ -1884,7 +1845,7 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_384) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_384) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_384);
   }
@@ -1894,26 +1855,21 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha3_512(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_512)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_512) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_512);
   }
@@ -1928,7 +1884,7 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_512) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_512) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_512);
   }
@@ -1938,26 +1894,21 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha384(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_384)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_384) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_384);
   }
@@ -1972,7 +1923,7 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_384) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_384) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_384);
   }
@@ -1982,26 +1933,21 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha512(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_512)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_512) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_512);
   }
@@ -2016,7 +1962,7 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_512) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_512) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_512);
   }
@@ -2026,26 +1972,20 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_blake2s(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_Blake2S)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2S) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_Blake2S);
   }
@@ -2075,7 +2015,7 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_Blake2S) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_Blake2S) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_Blake2S);
   }
@@ -2085,26 +2025,20 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_blake2b(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_Blake2B)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2B) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_Blake2B);
   }
@@ -2134,7 +2068,7 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_Blake2B) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_Blake2B) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_Blake2B);
   }
@@ -2144,93 +2078,94 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
 /**
 Perform a run-time test to determine which algorithm was chosen for the given piece of state.
 */
 Spec_Hash_Definitions_hash_alg
-EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_hash_state *s)
+EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_state_t *s)
 {
   EverCrypt_Hash_state_s *block_state = (*s).block_state;
   return alg_of_state(block_state);
 }
 
 /**
-Write the resulting hash into `dst`, an array whose length is
+Write the resulting hash into `output`, an array whose length is
 algorithm-specific. You can use the macros defined earlier in this file to
 allocate a destination buffer of the right length. The state remains valid after
-a call to `finish`, meaning the user may feed more data into the hash via
+a call to `digest`, meaning the user may feed more data into the hash via
 `update`. (The finish function operates on an internal copy of the state and
 therefore does not invalidate the client-held state.)
 */
-void EverCrypt_Hash_Incremental_finish(EverCrypt_Hash_Incremental_hash_state *s, uint8_t *dst)
+void
+EverCrypt_Hash_Incremental_digest(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  Spec_Hash_Definitions_hash_alg a1 = EverCrypt_Hash_Incremental_alg_of_state(s);
+  Spec_Hash_Definitions_hash_alg a1 = EverCrypt_Hash_Incremental_alg_of_state(state);
   switch (a1)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        finish_md5(s, dst);
+        digest_md5(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        finish_sha1(s, dst);
+        digest_sha1(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        finish_sha224(s, dst);
+        digest_sha224(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        finish_sha256(s, dst);
+        digest_sha256(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        finish_sha384(s, dst);
+        digest_sha384(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        finish_sha512(s, dst);
+        digest_sha512(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        finish_sha3_224(s, dst);
+        digest_sha3_224(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        finish_sha3_256(s, dst);
+        digest_sha3_256(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        finish_sha3_384(s, dst);
+        digest_sha3_384(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        finish_sha3_512(s, dst);
+        digest_sha3_512(state, output);
         break;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        finish_blake2s(s, dst);
+        digest_blake2s(state, output);
         break;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        finish_blake2b(s, dst);
+        digest_blake2b(state, output);
         break;
       }
     default:
@@ -2244,38 +2179,38 @@ void EverCrypt_Hash_Incremental_finish(EverCrypt_Hash_Incremental_hash_state *s,
 /**
 Free a state previously allocated with `create_in`.
 */
-void EverCrypt_Hash_Incremental_free(EverCrypt_Hash_Incremental_hash_state *s)
+void EverCrypt_Hash_Incremental_free(EverCrypt_Hash_Incremental_state_t *state)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *s;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   uint8_t *buf = scrut.buf;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   free_(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
-void EverCrypt_Hash_Incremental_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void EverCrypt_Hash_Incremental_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t x = Hacl_Hash_SHA2_h256[i];
     os[i] = x;);
   uint32_t *s = st;
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -2285,35 +2220,35 @@ void EverCrypt_Hash_Incremental_hash_256(uint8_t *input, uint32_t input_len, uin
   uint32_t rest_len = rest_len0;
   uint8_t *rest = rest0;
   EverCrypt_Hash_update_multi_256(s, blocks, blocks_n);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)blocks_len + (uint64_t)rest_len,
+  Hacl_Hash_SHA2_sha256_update_last((uint64_t)blocks_len + (uint64_t)rest_len,
     rest_len,
     rest,
     s);
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst);
+  Hacl_Hash_SHA2_sha256_finish(s, output);
 }
 
-static void hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
+static void hash_224(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
+    uint32_t x = Hacl_Hash_SHA2_h224[i];
     os[i] = x;);
   uint32_t *s = st;
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -2323,15 +2258,15 @@ static void hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
   uint32_t rest_len = rest_len0;
   uint8_t *rest = rest0;
   EverCrypt_Hash_update_multi_256(s, blocks, blocks_n);
-  Hacl_SHA2_Scalar32_sha224_update_last((uint64_t)blocks_len + (uint64_t)rest_len,
+  Hacl_Hash_SHA2_sha224_update_last((uint64_t)blocks_len + (uint64_t)rest_len,
     rest_len,
     rest,
     s);
-  Hacl_SHA2_Scalar32_sha224_finish(s, dst);
+  Hacl_Hash_SHA2_sha224_finish(s, output);
 }
 
 /**
-Hash `input`, of len `len`, into `dst`, an array whose length is determined by
+Hash `input`, of len `input_len`, into `output`, an array whose length is determined by
 your choice of algorithm `a` (see Hacl_Spec.h). You can use the macros defined
 earlier in this file to allocate a destination buffer of the right length. This
 API will automatically pick the most efficient implementation, provided you have
@@ -2340,61 +2275,61 @@ called EverCrypt_AutoConfig2_init() before.
 void
 EverCrypt_Hash_Incremental_hash(
   Spec_Hash_Definitions_hash_alg a,
-  uint8_t *dst,
+  uint8_t *output,
   uint8_t *input,
-  uint32_t len
+  uint32_t input_len
 )
 {
   switch (a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        Hacl_Hash_MD5_legacy_hash(input, len, dst);
+        Hacl_Hash_MD5_hash_oneshot(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        Hacl_Hash_SHA1_legacy_hash(input, len, dst);
+        Hacl_Hash_SHA1_hash_oneshot(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        hash_224(input, len, dst);
+        hash_224(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        EverCrypt_Hash_Incremental_hash_256(input, len, dst);
+        EverCrypt_Hash_Incremental_hash_256(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        Hacl_Streaming_SHA2_hash_384(input, len, dst);
+        Hacl_Hash_SHA2_hash_384(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        Hacl_Streaming_SHA2_hash_512(input, len, dst);
+        Hacl_Hash_SHA2_hash_512(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        Hacl_SHA3_sha3_224(len, input, dst);
+        Hacl_Hash_SHA3_sha3_224(input_len, input, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        Hacl_SHA3_sha3_256(len, input, dst);
+        Hacl_Hash_SHA3_sha3_256(input_len, input, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        Hacl_SHA3_sha3_384(len, input, dst);
+        Hacl_Hash_SHA3_sha3_384(input_len, input, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        Hacl_SHA3_sha3_512(len, input, dst);
+        Hacl_Hash_SHA3_sha3_512(input_len, input, output);
         break;
       }
     case Spec_Hash_Definitions_Blake2S:
@@ -2403,12 +2338,12 @@ EverCrypt_Hash_Incremental_hash(
         bool vec128 = EverCrypt_AutoConfig2_has_vec128();
         if (vec128)
         {
-          Hacl_Blake2s_128_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+          Hacl_Hash_Blake2s_Simd128_hash_with_key(output, 32U, input, input_len, NULL, 0U);
           return;
         }
-        Hacl_Blake2s_32_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Hash_Blake2s_hash_with_key(output, 32U, input, input_len, NULL, 0U);
         #else
-        Hacl_Blake2s_32_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Hash_Blake2s_hash_with_key(output, 32U, input, input_len, NULL, 0U);
         #endif
         break;
       }
@@ -2418,12 +2353,12 @@ EverCrypt_Hash_Incremental_hash(
         bool vec256 = EverCrypt_AutoConfig2_has_vec256();
         if (vec256)
         {
-          Hacl_Blake2b_256_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+          Hacl_Hash_Blake2b_Simd256_hash_with_key(output, 64U, input, input_len, NULL, 0U);
           return;
         }
-        Hacl_Blake2b_32_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Hash_Blake2b_hash_with_key(output, 64U, input, input_len, NULL, 0U);
         #else
-        Hacl_Blake2b_32_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Hash_Blake2b_hash_with_key(output, 64U, input, input_len, NULL, 0U);
         #endif
         break;
       }
diff --git a/src/EverCrypt_Poly1305.c b/src/EverCrypt_Poly1305.c
index 454c0fce..33ee20f3 100644
--- a/src/EverCrypt_Poly1305.c
+++ b/src/EverCrypt_Poly1305.c
@@ -31,60 +31,60 @@
 KRML_MAYBE_UNUSED static void
 poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(src);
-  KRML_HOST_IGNORE(len);
-  KRML_HOST_IGNORE(key);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(src);
+  KRML_MAYBE_UNUSED_VAR(len);
+  KRML_MAYBE_UNUSED_VAR(key);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ctx[192U] = { 0U };
-  memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t));
-  uint32_t n_blocks = len / (uint32_t)16U;
-  uint32_t n_extra = len % (uint32_t)16U;
+  memcpy(ctx + 24U, key, 32U * sizeof (uint8_t));
+  uint32_t n_blocks = len / 16U;
+  uint32_t n_extra = len % 16U;
   uint8_t tmp[16U] = { 0U };
-  if (n_extra == (uint32_t)0U)
+  if (n_extra == 0U)
   {
-    KRML_HOST_IGNORE(x64_poly1305(ctx, src, (uint64_t)len, (uint64_t)1U));
+    x64_poly1305(ctx, src, (uint64_t)len, 1ULL);
   }
   else
   {
-    uint32_t len16 = n_blocks * (uint32_t)16U;
+    uint32_t len16 = n_blocks * 16U;
     uint8_t *src16 = src;
     memcpy(tmp, src + len16, n_extra * sizeof (uint8_t));
-    KRML_HOST_IGNORE(x64_poly1305(ctx, src16, (uint64_t)len16, (uint64_t)0U));
-    memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(x64_poly1305(ctx, tmp, (uint64_t)n_extra, (uint64_t)1U));
+    x64_poly1305(ctx, src16, (uint64_t)len16, 0ULL);
+    memcpy(ctx + 24U, key, 32U * sizeof (uint8_t));
+    x64_poly1305(ctx, tmp, (uint64_t)n_extra, 1ULL);
   }
-  memcpy(dst, ctx, (uint32_t)16U * sizeof (uint8_t));
+  memcpy(dst, ctx, 16U * sizeof (uint8_t));
   #endif
 }
 
-void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key)
+void EverCrypt_Poly1305_mac(uint8_t *output, uint8_t *input, uint32_t input_len, uint8_t *key)
 {
   bool vec256 = EverCrypt_AutoConfig2_has_vec256();
   bool vec128 = EverCrypt_AutoConfig2_has_vec128();
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
-    Hacl_Poly1305_256_poly1305_mac(dst, len, src, key);
+    KRML_MAYBE_UNUSED_VAR(vec128);
+    Hacl_MAC_Poly1305_Simd256_mac(output, input, input_len, key);
     return;
   }
   #endif
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
-    Hacl_Poly1305_128_poly1305_mac(dst, len, src, key);
+    KRML_MAYBE_UNUSED_VAR(vec256);
+    Hacl_MAC_Poly1305_Simd128_mac(output, input, input_len, key);
     return;
   }
   #endif
-  KRML_HOST_IGNORE(vec256);
-  KRML_HOST_IGNORE(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
+  KRML_MAYBE_UNUSED_VAR(vec128);
   #if HACL_CAN_COMPILE_VALE
-  poly1305_vale(dst, src, len, key);
+  poly1305_vale(output, input, input_len, key);
   #else
   KRML_HOST_IGNORE(poly1305_vale);
-  Hacl_Poly1305_32_poly1305_mac(dst, len, src, key);
+  Hacl_MAC_Poly1305_mac(output, input, input_len, key);
   #endif
 }
 
diff --git a/src/Hacl_Chacha20Poly1305_32.c b/src/Hacl_AEAD_Chacha20Poly1305.c
similarity index 70%
rename from src/Hacl_Chacha20Poly1305_32.c
rename to src/Hacl_AEAD_Chacha20Poly1305.c
index 179af485..310c84fc 100644
--- a/src/Hacl_Chacha20Poly1305_32.c
+++ b/src/Hacl_AEAD_Chacha20Poly1305.c
@@ -23,35 +23,36 @@
  */
 
 
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
+#include "internal/Hacl_MAC_Poly1305.h"
 #include "internal/Hacl_Krmllib.h"
 
 static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  uint64_t *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  uint64_t *pre0 = ctx + 5U;
   uint64_t *acc0 = ctx;
-  uint32_t nb = n * (uint32_t)16U / (uint32_t)16U;
-  uint32_t rem1 = n * (uint32_t)16U % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = n * 16U / 16U;
+  uint32_t rem1 = n * 16U % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = blocks + i * (uint32_t)16U;
+    uint8_t *block = blocks + i * 16U;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -62,12 +63,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r1 = pre0;
-    uint64_t *r5 = pre0 + (uint32_t)5U;
+    uint64_t *r5 = pre0 + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -122,28 +123,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -157,23 +158,23 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = blocks + nb * (uint32_t)16U;
+    uint8_t *last = blocks + nb * 16U;
     uint64_t e[5U] = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -184,12 +185,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     uint64_t mask = b;
-    uint64_t fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = fi | mask;
+    uint64_t fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = fi | mask;
     uint64_t *r1 = pre0;
-    uint64_t *r5 = pre0 + (uint32_t)5U;
+    uint64_t *r5 = pre0 + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -244,28 +245,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -281,22 +282,22 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    uint64_t *pre = ctx + (uint32_t)5U;
+    uint64_t *pre = ctx + 5U;
     uint64_t *acc = ctx;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -307,12 +308,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r1 = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
+    uint64_t *r5 = pre + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -367,28 +368,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -417,31 +418,31 @@ poly1305_do_32(
 {
   uint64_t ctx[25U] = { 0U };
   uint8_t block[16U] = { 0U };
-  Hacl_Poly1305_32_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  Hacl_MAC_Poly1305_poly1305_init(ctx, k);
+  if (aadlen != 0U)
   {
     poly1305_padded_32(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_32(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  uint64_t *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  uint64_t *pre = ctx + 5U;
   uint64_t *acc = ctx;
   uint64_t e[5U] = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   uint64_t f0 = lo;
   uint64_t f1 = hi;
-  uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-  uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = f1 >> (uint32_t)40U;
+  uint64_t f010 = f0 & 0x3ffffffULL;
+  uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+  uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = f1 >> 40U;
   uint64_t f01 = f010;
   uint64_t f111 = f110;
   uint64_t f2 = f20;
@@ -452,12 +453,12 @@ poly1305_do_32(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   uint64_t mask = b;
   uint64_t f4 = e[4U];
   e[4U] = f4 | mask;
   uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
+  uint64_t *r5 = pre + 5U;
   uint64_t r0 = r[0U];
   uint64_t r1 = r[1U];
   uint64_t r2 = r[2U];
@@ -512,28 +513,28 @@ poly1305_do_32(
   uint64_t t2 = a26;
   uint64_t t3 = a36;
   uint64_t t4 = a46;
-  uint64_t mask26 = (uint64_t)0x3ffffffU;
-  uint64_t z0 = t0 >> (uint32_t)26U;
-  uint64_t z1 = t3 >> (uint32_t)26U;
+  uint64_t mask26 = 0x3ffffffULL;
+  uint64_t z0 = t0 >> 26U;
+  uint64_t z1 = t3 >> 26U;
   uint64_t x0 = t0 & mask26;
   uint64_t x3 = t3 & mask26;
   uint64_t x1 = t1 + z0;
   uint64_t x4 = t4 + z1;
-  uint64_t z01 = x1 >> (uint32_t)26U;
-  uint64_t z11 = x4 >> (uint32_t)26U;
-  uint64_t t = z11 << (uint32_t)2U;
+  uint64_t z01 = x1 >> 26U;
+  uint64_t z11 = x4 >> 26U;
+  uint64_t t = z11 << 2U;
   uint64_t z12 = z11 + t;
   uint64_t x11 = x1 & mask26;
   uint64_t x41 = x4 & mask26;
   uint64_t x2 = t2 + z01;
   uint64_t x01 = x0 + z12;
-  uint64_t z02 = x2 >> (uint32_t)26U;
-  uint64_t z13 = x01 >> (uint32_t)26U;
+  uint64_t z02 = x2 >> 26U;
+  uint64_t z13 = x01 >> 26U;
   uint64_t x21 = x2 & mask26;
   uint64_t x02 = x01 & mask26;
   uint64_t x31 = x3 + z02;
   uint64_t x12 = x11 + z13;
-  uint64_t z03 = x31 >> (uint32_t)26U;
+  uint64_t z03 = x31 >> 26U;
   uint64_t x32 = x31 & mask26;
   uint64_t x42 = x41 + z03;
   uint64_t o0 = x02;
@@ -546,42 +547,41 @@ poly1305_do_32(
   acc[2U] = o2;
   acc[3U] = o3;
   acc[4U] = o4;
-  Hacl_Poly1305_32_poly1305_finish(out, k, ctx);
+  Hacl_MAC_Poly1305_poly1305_finish(out, k, ctx);
 }
 
 /**
-Encrypt a message `m` with key `k`.
+Encrypt a message `input` with key `key`.
 
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
 */
 void
-Hacl_Chacha20Poly1305_32_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
 )
 {
-  Hacl_Chacha20_chacha20_encrypt(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_chacha20_encrypt(input_len, output, input, key, nonce, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_32(key, aadlen, aad, mlen, cipher, mac);
+  Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_32(key1, data_len, data, input_len, output, tag);
 }
 
 /**
@@ -606,35 +606,35 @@ If decryption fails, the array `m` remains unchanged and the function returns th
 @returns 0 on succeess; 1 on failure.
 */
 uint32_t
-Hacl_Chacha20Poly1305_32_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
 )
 {
-  uint8_t computed_mac[16U] = { 0U };
+  uint8_t computed_tag[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_32(key1, data_len, data, input_len, input, computed_tag);
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    0U,
+    16U,
+    1U,
+    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_tag[i], tag[i]);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_chacha20_encrypt(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_chacha20_encrypt(input_len, output, input, key, nonce, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_Chacha20Poly1305_128.c b/src/Hacl_AEAD_Chacha20Poly1305_Simd128.c
similarity index 77%
rename from src/Hacl_Chacha20Poly1305_128.c
rename to src/Hacl_AEAD_Chacha20Poly1305_Simd128.c
index 4cf2eae9..0cfa41fd 100644
--- a/src/Hacl_Chacha20Poly1305_128.c
+++ b/src/Hacl_AEAD_Chacha20Poly1305_Simd128.c
@@ -23,65 +23,60 @@
  */
 
 
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
-#include "internal/Hacl_Poly1305_128.h"
+#include "internal/Hacl_MAC_Poly1305_Simd128.h"
 #include "internal/Hacl_Krmllib.h"
 #include "libintvector.h"
 
 static inline void
 poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc0 = ctx;
-  uint32_t sz_block = (uint32_t)32U;
-  uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
+  uint32_t sz_block = 32U;
+  uint32_t len0 = n * 16U / sz_block * sz_block;
   uint8_t *t00 = blocks;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)32U;
+    uint32_t bs = 32U;
     uint8_t *text0 = t00;
-    Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc0, text0);
+    Hacl_MAC_Poly1305_Simd128_load_acc2(acc0, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t00 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
       Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
-      Lib_IntVector_Intrinsics_vec128
-      b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
+      Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + 16U);
       Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       f00 =
         Lib_IntVector_Intrinsics_vec128_and(lo,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f15 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)26U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f25 =
-        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)52U),
+        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
           Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-              Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-            (uint32_t)12U));
+              Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+            12U));
       Lib_IntVector_Intrinsics_vec128
       f30 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-            (uint32_t)14U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-      Lib_IntVector_Intrinsics_vec128
-      f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+      Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
       Lib_IntVector_Intrinsics_vec128 f0 = f00;
       Lib_IntVector_Intrinsics_vec128 f1 = f15;
       Lib_IntVector_Intrinsics_vec128 f2 = f25;
@@ -92,12 +87,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       e[2U] = f2;
       e[3U] = f3;
       e[4U] = f41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
       Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec128 *rn = pre0 + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec128 *rn5 = pre0 + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec128 *rn = pre0 + 10U;
+      Lib_IntVector_Intrinsics_vec128 *rn5 = pre0 + 15U;
       Lib_IntVector_Intrinsics_vec128 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec128 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec128 r2 = rn[2U];
@@ -202,37 +197,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       Lib_IntVector_Intrinsics_vec128 t2 = a24;
       Lib_IntVector_Intrinsics_vec128 t3 = a34;
       Lib_IntVector_Intrinsics_vec128 t4 = a44;
-      Lib_IntVector_Intrinsics_vec128
-      mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec128
-      z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec128
-      z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
       Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec128
-      z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec128
-      z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec128 o00 = x02;
@@ -266,45 +252,41 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       acc0[3U] = o3;
       acc0[4U] = o4;
     }
-    Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(acc0, pre0);
+    Hacl_MAC_Poly1305_Simd128_fmul_r2_normalize(acc0, pre0);
   }
-  uint32_t len1 = n * (uint32_t)16U - len0;
+  uint32_t len1 = n * 16U - len0;
   uint8_t *t10 = blocks + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem1 = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem1 = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t10 + i * (uint32_t)16U;
+    uint8_t *block = t10 + i * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -315,12 +297,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -435,37 +417,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -479,41 +452,37 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = t10 + nb * (uint32_t)16U;
+    uint8_t *last = t10 + nb * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -524,12 +493,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-    Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -644,37 +613,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -690,40 +650,36 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
     Lib_IntVector_Intrinsics_vec128 *acc = ctx;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -734,12 +690,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -854,37 +810,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -913,49 +860,45 @@ poly1305_do_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[25U] KRML_POST_ALIGN(16) = { 0U };
   uint8_t block[16U] = { 0U };
-  Hacl_Poly1305_128_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  Hacl_MAC_Poly1305_Simd128_poly1305_init(ctx, k);
+  if (aadlen != 0U)
   {
     poly1305_padded_128(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_128(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
   Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_and(f0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f110 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec128 f01 = f010;
   Lib_IntVector_Intrinsics_vec128 f111 = f110;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -966,12 +909,12 @@ poly1305_do_128(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
   Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1086,37 +1029,28 @@ poly1305_do_128(
   Lib_IntVector_Intrinsics_vec128 t2 = a26;
   Lib_IntVector_Intrinsics_vec128 t3 = a36;
   Lib_IntVector_Intrinsics_vec128 t4 = a46;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1129,95 +1063,93 @@ poly1305_do_128(
   acc[2U] = o2;
   acc[3U] = o3;
   acc[4U] = o4;
-  Hacl_Poly1305_128_poly1305_finish(out, k, ctx);
+  Hacl_MAC_Poly1305_Simd128_poly1305_finish(out, k, ctx);
 }
 
 /**
-Encrypt a message `m` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+Encrypt a message `input` with key `key`.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
 */
 void
-Hacl_Chacha20Poly1305_128_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
 )
 {
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(input_len, output, input, key, nonce, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_128(key, aadlen, aad, mlen, cipher, mac);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_128(key1, data_len, data, input_len, output, tag);
 }
 
 /**
-Decrypt a ciphertext `cipher` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+Decrypt a ciphertext `input` with key `key`.
 
-If decryption succeeds, the resulting plaintext is stored in `m` and the function returns the success code 0.
-If decryption fails, the array `m` remains unchanged and the function returns the error code 1.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
+If decryption succeeds, the resulting plaintext is stored in `output` and the function returns the success code 0.
+If decryption fails, the array `output` remains unchanged and the function returns the error code 1.
 
-@param mlen Length of the ciphertext.
-@param m Pointer to `mlen` bytes of memory where the message is written to.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is read from.
-@param mac Pointer to 16 bytes of memory where the mac is read from.
+@param output Pointer to `input_len` bytes of memory where the message is written to.
+@param input Pointer to `input_len` bytes of memory where the ciphertext is read from.
+@param input_len Length of the ciphertext.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+@param tag Pointer to 16 bytes of memory where the mac is read from.
 
 @returns 0 on succeess; 1 on failure.
 */
 uint32_t
-Hacl_Chacha20Poly1305_128_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
 )
 {
-  uint8_t computed_mac[16U] = { 0U };
+  uint8_t computed_tag[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_128(key1, data_len, data, input_len, input, computed_tag);
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    0U,
+    16U,
+    1U,
+    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_tag[i], tag[i]);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_Vec128_chacha20_encrypt_128(input_len, output, input, key, nonce, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_Chacha20Poly1305_256.c b/src/Hacl_AEAD_Chacha20Poly1305_Simd256.c
similarity index 77%
rename from src/msvc/Hacl_Chacha20Poly1305_256.c
rename to src/Hacl_AEAD_Chacha20Poly1305_Simd256.c
index c3dfec03..28414516 100644
--- a/src/msvc/Hacl_Chacha20Poly1305_256.c
+++ b/src/Hacl_AEAD_Chacha20Poly1305_Simd256.c
@@ -23,67 +23,61 @@
  */
 
 
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
-#include "internal/Hacl_Poly1305_256.h"
+#include "internal/Hacl_MAC_Poly1305_Simd256.h"
 #include "internal/Hacl_Krmllib.h"
 #include "libintvector.h"
 
 static inline void
 poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc0 = ctx;
-  uint32_t sz_block = (uint32_t)64U;
-  uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
+  uint32_t sz_block = 64U;
+  uint32_t len0 = n * 16U / sz_block * sz_block;
   uint8_t *t00 = blocks;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)64U;
+    uint32_t bs = 64U;
     uint8_t *text0 = t00;
-    Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc0, text0);
+    Hacl_MAC_Poly1305_Simd256_load_acc4(acc0, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t00 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
       Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
+      Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + 32U);
       Lib_IntVector_Intrinsics_vec256
-      hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
-      Lib_IntVector_Intrinsics_vec256
-      mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+      mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
       Lib_IntVector_Intrinsics_vec256
       m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
       Lib_IntVector_Intrinsics_vec256
       m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-      Lib_IntVector_Intrinsics_vec256
-      m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-      Lib_IntVector_Intrinsics_vec256
-      m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+      Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+      Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
       Lib_IntVector_Intrinsics_vec256
       m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-      Lib_IntVector_Intrinsics_vec256
-      t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
+      Lib_IntVector_Intrinsics_vec256 t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 4U);
       Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
       Lib_IntVector_Intrinsics_vec256
-      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
+      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, 26U);
       Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
       Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
+      Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 30U);
       Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
       Lib_IntVector_Intrinsics_vec256 o00 = o5;
       Lib_IntVector_Intrinsics_vec256 o11 = o10;
       Lib_IntVector_Intrinsics_vec256 o21 = o20;
@@ -94,12 +88,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       e[2U] = o21;
       e[3U] = o31;
       e[4U] = o41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
       Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec256 *rn = pre0 + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec256 *rn5 = pre0 + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec256 *rn = pre0 + 10U;
+      Lib_IntVector_Intrinsics_vec256 *rn5 = pre0 + 15U;
       Lib_IntVector_Intrinsics_vec256 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -204,37 +198,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       Lib_IntVector_Intrinsics_vec256 t2 = a24;
       Lib_IntVector_Intrinsics_vec256 t3 = a34;
       Lib_IntVector_Intrinsics_vec256 t4 = a44;
-      Lib_IntVector_Intrinsics_vec256
-      mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec256
-      z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec256
-      z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
       Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec256
-      z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec256
-      z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec256 o01 = x02;
@@ -268,45 +253,41 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       acc0[3U] = o3;
       acc0[4U] = o4;
     }
-    Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(acc0, pre0);
+    Hacl_MAC_Poly1305_Simd256_fmul_r4_normalize(acc0, pre0);
   }
-  uint32_t len1 = n * (uint32_t)16U - len0;
+  uint32_t len1 = n * 16U - len0;
   uint8_t *t10 = blocks + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem1 = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem1 = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t10 + i * (uint32_t)16U;
+    uint8_t *block = t10 + i * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -317,12 +298,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -437,37 +418,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -481,41 +453,37 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = t10 + nb * (uint32_t)16U;
+    uint8_t *last = t10 + nb * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -526,12 +494,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-    Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -646,37 +614,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -692,40 +651,36 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
     Lib_IntVector_Intrinsics_vec256 *acc = ctx;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -736,12 +691,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -856,37 +811,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -915,49 +861,45 @@ poly1305_do_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[25U] KRML_POST_ALIGN(32) = { 0U };
   uint8_t block[16U] = { 0U };
-  Hacl_Poly1305_256_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  Hacl_MAC_Poly1305_Simd256_poly1305_init(ctx, k);
+  if (aadlen != 0U)
   {
     poly1305_padded_256(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_256(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_and(f0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f110 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec256 f01 = f010;
   Lib_IntVector_Intrinsics_vec256 f111 = f110;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -968,12 +910,12 @@ poly1305_do_256(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
   Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1088,37 +1030,28 @@ poly1305_do_256(
   Lib_IntVector_Intrinsics_vec256 t2 = a26;
   Lib_IntVector_Intrinsics_vec256 t3 = a36;
   Lib_IntVector_Intrinsics_vec256 t4 = a46;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1131,95 +1064,93 @@ poly1305_do_256(
   acc[2U] = o2;
   acc[3U] = o3;
   acc[4U] = o4;
-  Hacl_Poly1305_256_poly1305_finish(out, k, ctx);
+  Hacl_MAC_Poly1305_Simd256_poly1305_finish(out, k, ctx);
 }
 
 /**
-Encrypt a message `m` with key `k`.
+Encrypt a message `input` with key `key`.
 
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
 */
 void
-Hacl_Chacha20Poly1305_256_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
 )
 {
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(input_len, output, input, key, nonce, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_256(key, aadlen, aad, mlen, cipher, mac);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_256(key1, data_len, data, input_len, output, tag);
 }
 
 /**
-Decrypt a ciphertext `cipher` with key `k`.
+Decrypt a ciphertext `input` with key `key`.
 
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-If decryption succeeds, the resulting plaintext is stored in `m` and the function returns the success code 0.
-If decryption fails, the array `m` remains unchanged and the function returns the error code 1.
+If decryption succeeds, the resulting plaintext is stored in `output` and the function returns the success code 0.
+If decryption fails, the array `output` remains unchanged and the function returns the error code 1.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the ciphertext.
-@param m Pointer to `mlen` bytes of memory where the message is written to.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is read from.
-@param mac Pointer to 16 bytes of memory where the mac is read from.
+@param output Pointer to `input_len` bytes of memory where the message is written to.
+@param input Pointer to `input_len` bytes of memory where the ciphertext is read from.
+@param input_len Length of the ciphertext.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+@param tag Pointer to 16 bytes of memory where the mac is read from.
 
 @returns 0 on succeess; 1 on failure.
 */
 uint32_t
-Hacl_Chacha20Poly1305_256_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
 )
 {
-  uint8_t computed_mac[16U] = { 0U };
+  uint8_t computed_tag[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_256(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_256(key1, data_len, data, input_len, input, computed_tag);
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    0U,
+    16U,
+    1U,
+    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_tag[i], tag[i]);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_Vec256_chacha20_encrypt_256(input_len, output, input, key, nonce, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_Bignum.c b/src/Hacl_Bignum.c
index fe73faa6..20ec5141 100644
--- a/src/Hacl_Bignum.c
+++ b/src/Hacl_Bignum.c
@@ -37,12 +37,12 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_mul_u32(aLen, a, aLen, b, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint32_t *a0 = a;
   uint32_t *a1 = a + len2;
   uint32_t *b0 = b;
@@ -52,23 +52,23 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *tmp_ = tmp + aLen;
   uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_);
   uint32_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t0;
-    uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]);
+    uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c10);
+  KRML_MAYBE_UNUSED_VAR(c10);
   uint32_t c00 = c0;
   uint32_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b0, b1, tmp_);
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b1, b0, t1);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t1;
-    uint32_t x = (((uint32_t)0U - c010) & t1[i]) | (~((uint32_t)0U - c010) & tmp_[i]);
+    uint32_t x = ((0U - c010) & t1[i]) | (~(0U - c010) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint32_t c11 = c010;
   uint32_t *t23 = tmp + aLen;
   uint32_t *tmp1 = tmp + aLen + aLen;
@@ -81,66 +81,61 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *r231 = res + aLen;
   uint32_t *t01 = tmp;
   uint32_t *t231 = tmp + aLen;
-  uint32_t *t45 = tmp + (uint32_t)2U * aLen;
-  uint32_t *t67 = tmp + (uint32_t)3U * aLen;
+  uint32_t *t45 = tmp + 2U * aLen;
+  uint32_t *t67 = tmp + 3U * aLen;
   uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01);
   uint32_t c_sign = c00 ^ c11;
   uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t67);
   uint32_t c31 = c2 - c3;
   uint32_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, t01, t231, t45);
   uint32_t c41 = c2 + c4;
-  uint32_t mask = (uint32_t)0U - c_sign;
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  uint32_t mask = 0U - c_sign;
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint32_t *os = t45;
     uint32_t x = (mask & t45[i]) | (~mask & t67[i]);
     os[i] = x;
   }
   uint32_t c5 = (mask & c41) | (~mask & c31);
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint32_t *r0 = res + aLen2;
   uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0);
   uint32_t c6 = r10;
   uint32_t c60 = c6;
   uint32_t c7 = c5 + c60;
   uint32_t *r = res + aLen + aLen2;
-  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r);
+  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r);
   uint32_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint32_t *a11 = r + (uint32_t)1U;
-    uint32_t *res1 = r + (uint32_t)1U;
+    uint32_t *a11 = r + 1U;
+    uint32_t *res1 = r + 1U;
     uint32_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint32_t t11 = a11[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i0);
-      uint32_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, (uint32_t)0U, res_i1);
-      uint32_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, (uint32_t)0U, res_i2);
-      uint32_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, (uint32_t)0U, res_i);
+      uint32_t t11 = a11[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i0);
+      uint32_t t110 = a11[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, 0U, res_i1);
+      uint32_t t111 = a11[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, 0U, res_i2);
+      uint32_t t112 = a11[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, 0U, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint32_t t11 = a11[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i);
     }
     uint32_t c110 = c;
     r1 = c110;
@@ -152,7 +147,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t c8 = r1;
   uint32_t c = c8;
   uint32_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -164,12 +159,12 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_mul_u64(aLen, a, aLen, b, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint64_t *a0 = a;
   uint64_t *a1 = a + len2;
   uint64_t *b0 = b;
@@ -179,23 +174,23 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *tmp_ = tmp + aLen;
   uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_);
   uint64_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t0;
-    uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]);
+    uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c10);
+  KRML_MAYBE_UNUSED_VAR(c10);
   uint64_t c00 = c0;
   uint64_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b0, b1, tmp_);
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b1, b0, t1);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t1;
-    uint64_t x = (((uint64_t)0U - c010) & t1[i]) | (~((uint64_t)0U - c010) & tmp_[i]);
+    uint64_t x = ((0ULL - c010) & t1[i]) | (~(0ULL - c010) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint64_t c11 = c010;
   uint64_t *t23 = tmp + aLen;
   uint64_t *tmp1 = tmp + aLen + aLen;
@@ -208,66 +203,61 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *r231 = res + aLen;
   uint64_t *t01 = tmp;
   uint64_t *t231 = tmp + aLen;
-  uint64_t *t45 = tmp + (uint32_t)2U * aLen;
-  uint64_t *t67 = tmp + (uint32_t)3U * aLen;
+  uint64_t *t45 = tmp + 2U * aLen;
+  uint64_t *t67 = tmp + 3U * aLen;
   uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01);
   uint64_t c_sign = c00 ^ c11;
   uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t67);
   uint64_t c31 = c2 - c3;
   uint64_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, t01, t231, t45);
   uint64_t c41 = c2 + c4;
-  uint64_t mask = (uint64_t)0U - c_sign;
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  uint64_t mask = 0ULL - c_sign;
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint64_t *os = t45;
     uint64_t x = (mask & t45[i]) | (~mask & t67[i]);
     os[i] = x;
   }
   uint64_t c5 = (mask & c41) | (~mask & c31);
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint64_t *r0 = res + aLen2;
   uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0);
   uint64_t c6 = r10;
   uint64_t c60 = c6;
   uint64_t c7 = c5 + c60;
   uint64_t *r = res + aLen + aLen2;
-  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r);
+  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r);
   uint64_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint64_t *a11 = r + (uint32_t)1U;
-    uint64_t *res1 = r + (uint32_t)1U;
+    uint64_t *a11 = r + 1U;
+    uint64_t *res1 = r + 1U;
     uint64_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint64_t t11 = a11[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i0);
-      uint64_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, (uint64_t)0U, res_i1);
-      uint64_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, (uint64_t)0U, res_i2);
-      uint64_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, (uint64_t)0U, res_i);
+      uint64_t t11 = a11[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i0);
+      uint64_t t110 = a11[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, 0ULL, res_i1);
+      uint64_t t111 = a11[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, 0ULL, res_i2);
+      uint64_t t112 = a11[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, 0ULL, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint64_t t11 = a11[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i);
     }
     uint64_t c110 = c;
     r1 = c110;
@@ -279,7 +269,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t c8 = r1;
   uint64_t c = c8;
   uint64_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -290,27 +280,27 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_sqr_u32(aLen, a, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint32_t *a0 = a;
   uint32_t *a1 = a + len2;
   uint32_t *t0 = tmp;
   uint32_t *tmp_ = tmp + aLen;
   uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_);
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t0;
-    uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]);
+    uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint32_t c00 = c0;
-  KRML_HOST_IGNORE(c00);
+  KRML_MAYBE_UNUSED_VAR(c00);
   uint32_t *t23 = tmp + aLen;
   uint32_t *tmp1 = tmp + aLen + aLen;
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, t0, tmp1, t23);
@@ -322,54 +312,49 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t *r231 = res + aLen;
   uint32_t *t01 = tmp;
   uint32_t *t231 = tmp + aLen;
-  uint32_t *t45 = tmp + (uint32_t)2U * aLen;
+  uint32_t *t45 = tmp + 2U * aLen;
   uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01);
   uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t45);
   uint32_t c5 = c2 - c3;
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint32_t *r0 = res + aLen2;
   uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0);
   uint32_t c4 = r10;
   uint32_t c6 = c4;
   uint32_t c7 = c5 + c6;
   uint32_t *r = res + aLen + aLen2;
-  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r);
+  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r);
   uint32_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint32_t *a11 = r + (uint32_t)1U;
-    uint32_t *res1 = r + (uint32_t)1U;
+    uint32_t *a11 = r + 1U;
+    uint32_t *res1 = r + 1U;
     uint32_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint32_t t1 = a11[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a11[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a11[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a11[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a11[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, 0U, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint32_t t1 = a11[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     r1 = c10;
@@ -381,7 +366,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t c8 = r1;
   uint32_t c = c8;
   uint32_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -392,27 +377,27 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_sqr_u64(aLen, a, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint64_t *a0 = a;
   uint64_t *a1 = a + len2;
   uint64_t *t0 = tmp;
   uint64_t *tmp_ = tmp + aLen;
   uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_);
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t0;
-    uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]);
+    uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint64_t c00 = c0;
-  KRML_HOST_IGNORE(c00);
+  KRML_MAYBE_UNUSED_VAR(c00);
   uint64_t *t23 = tmp + aLen;
   uint64_t *tmp1 = tmp + aLen + aLen;
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, t0, tmp1, t23);
@@ -424,54 +409,49 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t *r231 = res + aLen;
   uint64_t *t01 = tmp;
   uint64_t *t231 = tmp + aLen;
-  uint64_t *t45 = tmp + (uint32_t)2U * aLen;
+  uint64_t *t45 = tmp + 2U * aLen;
   uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01);
   uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t45);
   uint64_t c5 = c2 - c3;
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint64_t *r0 = res + aLen2;
   uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0);
   uint64_t c4 = r10;
   uint64_t c6 = c4;
   uint64_t c7 = c5 + c6;
   uint64_t *r = res + aLen + aLen2;
-  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r);
+  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r);
   uint64_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint64_t *a11 = r + (uint32_t)1U;
-    uint64_t *res1 = r + (uint32_t)1U;
+    uint64_t *a11 = r + 1U;
+    uint64_t *res1 = r + 1U;
     uint64_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint64_t t1 = a11[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a11[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a11[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a11[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a11[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint64_t t1 = a11[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     r1 = c10;
@@ -483,7 +463,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t c8 = r1;
   uint64_t c = c8;
   uint64_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -495,27 +475,27 @@ Hacl_Bignum_bn_add_mod_n_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -526,27 +506,27 @@ Hacl_Bignum_bn_add_mod_n_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t tmp[len1];
   memset(tmp, 0U, len1 * sizeof (uint32_t));
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -555,7 +535,7 @@ Hacl_Bignum_bn_add_mod_n_u32(
   }
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -572,27 +552,27 @@ Hacl_Bignum_bn_add_mod_n_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -603,27 +583,27 @@ Hacl_Bignum_bn_add_mod_n_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t tmp[len1];
   memset(tmp, 0U, len1 * sizeof (uint64_t));
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -632,7 +612,7 @@ Hacl_Bignum_bn_add_mod_n_u64(
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -649,27 +629,27 @@ Hacl_Bignum_bn_sub_mod_n_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -680,27 +660,27 @@ Hacl_Bignum_bn_sub_mod_n_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t tmp[len1];
   memset(tmp, 0U, len1 * sizeof (uint32_t));
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -708,9 +688,9 @@ Hacl_Bignum_bn_sub_mod_n_u32(
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t2, res_i);
   }
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -727,27 +707,27 @@ Hacl_Bignum_bn_sub_mod_n_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -758,27 +738,27 @@ Hacl_Bignum_bn_sub_mod_n_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t tmp[len1];
   memset(tmp, 0U, len1 * sizeof (uint64_t));
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -786,9 +766,9 @@ Hacl_Bignum_bn_sub_mod_n_u64(
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -798,42 +778,42 @@ Hacl_Bignum_bn_sub_mod_n_u64(
 
 uint32_t Hacl_Bignum_ModInvLimb_mod_inv_uint32(uint32_t n0)
 {
-  uint32_t alpha = (uint32_t)2147483648U;
+  uint32_t alpha = 2147483648U;
   uint32_t beta = n0;
-  uint32_t ub = (uint32_t)0U;
-  uint32_t vb = (uint32_t)0U;
-  ub = (uint32_t)1U;
-  vb = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t ub = 0U;
+  uint32_t vb = 0U;
+  ub = 1U;
+  vb = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint32_t us = ub;
     uint32_t vs = vb;
-    uint32_t u_is_odd = (uint32_t)0U - (us & (uint32_t)1U);
+    uint32_t u_is_odd = 0U - (us & 1U);
     uint32_t beta_if_u_is_odd = beta & u_is_odd;
-    ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd);
+    ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd);
     uint32_t alpha_if_u_is_odd = alpha & u_is_odd;
-    vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd;
+    vb = (vs >> 1U) + alpha_if_u_is_odd;
   }
   return vb;
 }
 
 uint64_t Hacl_Bignum_ModInvLimb_mod_inv_uint64(uint64_t n0)
 {
-  uint64_t alpha = (uint64_t)9223372036854775808U;
+  uint64_t alpha = 9223372036854775808ULL;
   uint64_t beta = n0;
-  uint64_t ub = (uint64_t)0U;
-  uint64_t vb = (uint64_t)0U;
-  ub = (uint64_t)1U;
-  vb = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t ub = 0ULL;
+  uint64_t vb = 0ULL;
+  ub = 1ULL;
+  vb = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t us = ub;
     uint64_t vs = vb;
-    uint64_t u_is_odd = (uint64_t)0U - (us & (uint64_t)1U);
+    uint64_t u_is_odd = 0ULL - (us & 1ULL);
     uint64_t beta_if_u_is_odd = beta & u_is_odd;
-    ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd);
+    ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd);
     uint64_t alpha_if_u_is_odd = alpha & u_is_odd;
-    vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd;
+    vb = (vs >> 1U) + alpha_if_u_is_odd;
   }
   return vb;
 }
@@ -844,15 +824,15 @@ uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n)
   uint32_t one[len];
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   return m0 & m1;
@@ -867,10 +847,10 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(
 )
 {
   memset(res, 0U, len * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U * len - nBits; i0++)
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 64U * len - nBits; i0++)
   {
     Hacl_Bignum_bn_add_mod_n_u32(len, n, res, res, res);
   }
@@ -885,28 +865,28 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -923,27 +903,27 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint32_t));
-  uint32_t c1 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+  uint32_t c1 = 0U;
+  for (uint32_t i = 0U; i < len / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i);
   }
-  for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+  for (uint32_t i = len / 4U * 4U; i < len; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -952,7 +932,7 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   }
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -973,9 +953,9 @@ Hacl_Bignum_Montgomery_bn_to_mont_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv, c, aM);
 }
@@ -1009,9 +989,9 @@ Hacl_Bignum_Montgomery_bn_mont_mul_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
@@ -1028,9 +1008,9 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
@@ -1041,15 +1021,15 @@ uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n)
   uint64_t one[len];
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   return m0 & m1;
@@ -1064,10 +1044,10 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(
 )
 {
   memset(res, 0U, len * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U * len - nBits; i0++)
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 128U * len - nBits; i0++)
   {
     Hacl_Bignum_bn_add_mod_n_u64(len, n, res, res, res);
   }
@@ -1082,28 +1062,28 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -1120,27 +1100,27 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint64_t));
-  uint64_t c1 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+  uint64_t c1 = 0ULL;
+  for (uint32_t i = 0U; i < len / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i);
   }
-  for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+  for (uint32_t i = len / 4U * 4U; i < len; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -1149,7 +1129,7 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   }
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -1170,9 +1150,9 @@ Hacl_Bignum_Montgomery_bn_to_mont_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv, c, aM);
 }
@@ -1206,9 +1186,9 @@ Hacl_Bignum_Montgomery_bn_mont_mul_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
@@ -1225,9 +1205,9 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
@@ -1241,28 +1221,28 @@ bn_almost_mont_reduction_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -1280,9 +1260,9 @@ bn_almost_mont_reduction_u32(
   uint32_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint32_t));
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
@@ -1303,9 +1283,9 @@ bn_almost_mont_mul_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c);
   bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
@@ -1322,9 +1302,9 @@ bn_almost_mont_sqr_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c);
   bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
@@ -1338,28 +1318,28 @@ bn_almost_mont_reduction_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -1377,9 +1357,9 @@ bn_almost_mont_reduction_u64(
   uint64_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint64_t));
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
@@ -1400,9 +1380,9 @@ bn_almost_mont_mul_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c);
   bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
@@ -1419,9 +1399,9 @@ bn_almost_mont_sqr_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c);
   bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
@@ -1439,56 +1419,56 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(
   uint32_t one[len];
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
@@ -1507,7 +1487,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t aM[len];
@@ -1515,9 +1495,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
     KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
     uint32_t c[len + len];
     memset(c, 0U, (len + len) * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-    uint32_t tmp0[(uint32_t)4U * len];
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+    uint32_t tmp0[4U * len];
+    memset(tmp0, 0U, 4U * len * sizeof (uint32_t));
     Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
     Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
@@ -1531,13 +1511,13 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         bn_almost_mont_mul_u32(len, ctx_n0, mu, resM, aM, resM);
@@ -1558,31 +1538,31 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp0[(uint32_t)4U * len];
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp0[4U * len];
+  memset(tmp0, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t resM[len];
   memset(resM, 0U, len * sizeof (uint32_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t ctx[len + len];
   memset(ctx, 0U, (len + len) * sizeof (uint32_t));
   memcpy(ctx, n, len * sizeof (uint32_t));
   memcpy(ctx + len, r2, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len);
-  uint32_t table[(uint32_t)16U * len];
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len);
+  uint32_t table[16U * len];
+  memset(table, 0U, 16U * len * sizeof (uint32_t));
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint32_t));
@@ -1593,21 +1573,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * len;
     uint32_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * len;
     uint32_t *ctx_n = ctx;
     bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
     const uint32_t *a_bits_l = table + bits_l32 * len;
     memcpy(resM, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
@@ -1621,16 +1601,16 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t tmp1[len];
   memset(tmp1, 0U, len * sizeof (uint32_t));
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
     const uint32_t *a_bits_l = table + bits_l32 * len;
     memcpy(tmp1, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
@@ -1656,7 +1636,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t aM[len];
@@ -1664,9 +1644,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
     KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
     uint32_t c[len + len];
     memset(c, 0U, (len + len) * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-    uint32_t tmp0[(uint32_t)4U * len];
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+    uint32_t tmp0[4U * len];
+    memset(tmp0, 0U, 4U * len * sizeof (uint32_t));
     Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
     Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
@@ -1677,20 +1657,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
     memset(ctx, 0U, (len + len) * sizeof (uint32_t));
     memcpy(ctx, n, len * sizeof (uint32_t));
     memcpy(ctx + len, r2, len * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -1701,9 +1681,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len; i++)
+    for (uint32_t i = 0U; i < len; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
@@ -1720,31 +1700,31 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c0[len + len];
   memset(c0, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp0[(uint32_t)4U * len];
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp0[4U * len];
+  memset(tmp0, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c0);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c0, aM);
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t resM[len];
   memset(resM, 0U, len * sizeof (uint32_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t ctx[len + len];
   memset(ctx, 0U, (len + len) * sizeof (uint32_t));
   memcpy(ctx, n, len * sizeof (uint32_t));
   memcpy(ctx + len, r2, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len);
-  uint32_t table[(uint32_t)16U * len];
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len);
+  uint32_t table[16U * len];
+  memset(table, 0U, 16U * len * sizeof (uint32_t));
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint32_t));
@@ -1755,29 +1735,29 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * len;
     uint32_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * len;
     uint32_t *ctx_n = ctx;
     bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)(table + (uint32_t)0U * len), len * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -1793,24 +1773,24 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t tmp1[len];
   memset(tmp1, 0U, len * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp1, (uint32_t *)(table + (uint32_t)0U * len), len * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp1, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint32_t *os = tmp1;
         uint32_t x = (c & res_j[i]) | (~c & tmp1[i]);
@@ -1877,56 +1857,56 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(
   uint64_t one[len];
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
@@ -1945,7 +1925,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t aM[len];
@@ -1953,9 +1933,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
     KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
     uint64_t c[len + len];
     memset(c, 0U, (len + len) * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-    uint64_t tmp0[(uint32_t)4U * len];
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+    uint64_t tmp0[4U * len];
+    memset(tmp0, 0U, 4U * len * sizeof (uint64_t));
     Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
     Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
@@ -1969,13 +1949,13 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         bn_almost_mont_mul_u64(len, ctx_n0, mu, resM, aM, resM);
@@ -1996,31 +1976,31 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp0[(uint32_t)4U * len];
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp0[4U * len];
+  memset(tmp0, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t resM[len];
   memset(resM, 0U, len * sizeof (uint64_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t ctx[len + len];
   memset(ctx, 0U, (len + len) * sizeof (uint64_t));
   memcpy(ctx, n, len * sizeof (uint64_t));
   memcpy(ctx + len, r2, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len);
-  uint64_t table[(uint32_t)16U * len];
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len);
+  uint64_t table[16U * len];
+  memset(table, 0U, 16U * len * sizeof (uint64_t));
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint64_t));
@@ -2031,21 +2011,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * len;
     uint64_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * len;
     uint64_t *ctx_n = ctx;
     bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
     const uint64_t *a_bits_l = table + bits_l32 * len;
     memcpy(resM, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
@@ -2059,16 +2039,16 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t tmp1[len];
   memset(tmp1, 0U, len * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
     const uint64_t *a_bits_l = table + bits_l32 * len;
     memcpy(tmp1, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
@@ -2094,7 +2074,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t aM[len];
@@ -2102,9 +2082,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
     KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
     uint64_t c[len + len];
     memset(c, 0U, (len + len) * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-    uint64_t tmp0[(uint32_t)4U * len];
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+    uint64_t tmp0[4U * len];
+    memset(tmp0, 0U, 4U * len * sizeof (uint64_t));
     Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
     Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
@@ -2115,20 +2095,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
     memset(ctx, 0U, (len + len) * sizeof (uint64_t));
     memcpy(ctx, n, len * sizeof (uint64_t));
     memcpy(ctx + len, r2, len * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -2139,9 +2119,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len; i++)
+    for (uint32_t i = 0U; i < len; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
@@ -2158,31 +2138,31 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c0[len + len];
   memset(c0, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp0[(uint32_t)4U * len];
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp0[4U * len];
+  memset(tmp0, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c0);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c0, aM);
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t resM[len];
   memset(resM, 0U, len * sizeof (uint64_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t ctx[len + len];
   memset(ctx, 0U, (len + len) * sizeof (uint64_t));
   memcpy(ctx, n, len * sizeof (uint64_t));
   memcpy(ctx + len, r2, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len);
-  uint64_t table[(uint32_t)16U * len];
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len);
+  uint64_t table[16U * len];
+  memset(table, 0U, 16U * len * sizeof (uint64_t));
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t tmp[len];
   memset(tmp, 0U, len * sizeof (uint64_t));
@@ -2193,29 +2173,29 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * len;
     uint64_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * len;
     uint64_t *ctx_n = ctx;
     bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)(table + (uint32_t)0U * len), len * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -2231,24 +2211,24 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t tmp1[len];
   memset(tmp1, 0U, len * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp1, (uint64_t *)(table + (uint32_t)0U * len), len * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp1, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint64_t *os = tmp1;
         uint64_t x = (c & res_j[i]) | (~c & tmp1[i]);
diff --git a/src/Hacl_Bignum256.c b/src/Hacl_Bignum256.c
index 41aaadeb..a133dd17 100644
--- a/src/Hacl_Bignum256.c
+++ b/src/Hacl_Bignum256.c
@@ -60,23 +60,23 @@ Write `a + b mod 2^256` in `res`.
 */
 uint64_t Hacl_Bignum256_add(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   return c;
@@ -91,23 +91,23 @@ Write `a - b mod 2^256` in `res`.
 */
 uint64_t Hacl_Bignum256_sub(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   return c;
@@ -125,52 +125,52 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum256_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -188,53 +188,53 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum256_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -248,30 +248,30 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum256_mul(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = a[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 /**
@@ -282,31 +282,31 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -314,29 +314,29 @@ void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)512U - nBits; i0++)
+  memset(res, 0U, 4U * sizeof (uint64_t));
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++)
   {
     Hacl_Bignum256_add_mod(n, res, res, res);
   }
@@ -344,61 +344,61 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 
 static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)4U + i0;
-    uint64_t res_j = c[(uint32_t)4U + i0];
+    uint64_t *resb = c + 4U + i0;
+    uint64_t res_j = c[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, c + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c1 = (uint64_t)0U;
+  uint64_t c1 = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i);
   }
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -407,49 +407,49 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *
 static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, aM, 4U * sizeof (uint64_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)4U + i0;
-    uint64_t res_j = c[(uint32_t)4U + i0];
+    uint64_t *resb = c + 4U + i0;
+    uint64_t res_j = c[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, c + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
   uint64_t c1 = Hacl_Bignum256_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
     os[i] = x;);
@@ -459,61 +459,61 @@ static inline void
 amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *resM)
 {
   uint64_t c[8U] = { 0U };
-  memset(c, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(c, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = bM[i0];
     uint64_t *res_j = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = aM[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = aM[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c1, res_i0);
-      uint64_t a_i0 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = aM[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c1, res_i1);
-      uint64_t a_i1 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = aM[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c1, res_i2);
-      uint64_t a_i2 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = aM[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c1, res_i);
     }
     uint64_t r = c1;
-    c[(uint32_t)4U + i0] = r;);
+    c[4U + i0] = r;);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *resM)
 {
   uint64_t c[8U] = { 0U };
-  memset(c, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(c, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = aM;
     uint64_t a_j = aM[i0];
     uint64_t *res_j = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c1, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c1, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c1, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c1, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -521,20 +521,20 @@ static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint6
     }
     uint64_t r = c1;
     c[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, c, c);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, c, c, c);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res = FStar_UInt128_mul_wide(aM[i], aM[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, tmp, c);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, c, tmp, c);
+  KRML_MAYBE_UNUSED_VAR(c1);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -543,44 +543,44 @@ bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *r
 {
   uint64_t a_mod[4U] = { 0U };
   uint64_t a1[8U] = { 0U };
-  memcpy(a1, a, (uint32_t)8U * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
+  memcpy(a1, a, 8U * sizeof (uint64_t));
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = mu * a1[i0];
     uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = a1 + (uint32_t)4U + i0;
-    uint64_t res_j = a1[(uint32_t)4U + i0];
+    uint64_t *resb = a1 + 4U + i0;
+    uint64_t res_j = a1[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(a_mod, a1 + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(a_mod, a1 + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
   uint64_t c1 = Hacl_Bignum256_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = a_mod;
     uint64_t x = (m & tmp[i]) | (~m & a_mod[i]);
     os[i] = x;);
@@ -603,23 +603,22 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t r2[4U] = { 0U };
     precompr2(nBits, n, r2);
@@ -628,68 +627,68 @@ bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
   return m00 & m;
@@ -706,7 +705,7 @@ exp_vartime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[4U] = { 0U };
     uint64_t c[8U] = { 0U };
@@ -714,18 +713,18 @@ exp_vartime_precomp(
     reduction(n, mu, c, aM);
     uint64_t resM[4U] = { 0U };
     uint64_t ctx[8U] = { 0U };
-    memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+    memcpy(ctx, n, 4U * sizeof (uint64_t));
+    memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -734,7 +733,7 @@ exp_vartime_precomp(
       amont_sqr(ctx_n0, mu, aM, aM);
     }
     uint64_t tmp[8U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t));
+    memcpy(tmp, resM, 4U * sizeof (uint64_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -744,74 +743,70 @@ exp_vartime_precomp(
   reduction(n, mu, c, aM);
   uint64_t resM[4U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[8U] = { 0U };
-  memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(ctx, n, 4U * sizeof (uint64_t));
+  memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
   uint64_t table[64U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)4U;
+  uint64_t *t1 = table + 4U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)4U;
+  uint64_t *ctx_r20 = ctx + 4U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(t1, aM, 4U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 4U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U;
+    memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 4U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)4U;
-    memcpy(resM, (uint64_t *)a_bits_l, (uint32_t)4U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 4U;
+    memcpy(resM, (uint64_t *)a_bits_l, 4U * sizeof (uint64_t));
   }
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[4U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)4U;
-    memcpy(tmp0, (uint64_t *)a_bits_l, (uint32_t)4U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 4U;
+    memcpy(tmp0, (uint64_t *)a_bits_l, 4U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint64_t tmp1[8U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp1, resM, 4U * sizeof (uint64_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -826,7 +821,7 @@ exp_consttime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[4U] = { 0U };
     uint64_t c[8U] = { 0U };
@@ -834,24 +829,24 @@ exp_consttime_precomp(
     reduction(n, mu, c, aM);
     uint64_t resM[4U] = { 0U };
     uint64_t ctx[8U] = { 0U };
-    memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    memcpy(ctx, n, 4U * sizeof (uint64_t));
+    memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        0U,
+        4U,
+        1U,
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;);
       uint64_t *ctx_n0 = ctx;
@@ -862,14 +857,14 @@ exp_consttime_precomp(
     }
     uint64_t sw0 = sw;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      0U,
+      4U,
+      1U,
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;);
     uint64_t tmp[8U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t));
+    memcpy(tmp, resM, 4U * sizeof (uint64_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -879,56 +874,52 @@ exp_consttime_precomp(
   reduction(n, mu, c0, aM);
   uint64_t resM[4U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[8U] = { 0U };
-  memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(ctx, n, 4U * sizeof (uint64_t));
+  memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
   uint64_t table[64U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)4U;
+  uint64_t *t1 = table + 4U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)4U;
+  uint64_t *ctx_r20 = ctx + 4U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(t1, aM, 4U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 4U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U;
+    memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 4U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)table, (uint32_t)4U * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)table, 4U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)4U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 4U;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
         os[i] = x;););
@@ -936,31 +927,31 @@ exp_consttime_precomp(
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[4U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)4U * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 4U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)4U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 4U;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -968,7 +959,7 @@ exp_consttime_precomp(
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint64_t tmp1[8U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp1, resM, 4U * sizeof (uint64_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -1034,17 +1025,16 @@ Hacl_Bignum256_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -1077,17 +1067,16 @@ Hacl_Bignum256_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -1108,67 +1097,66 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t n2[4U] = { 0U };
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
-    uint64_t *a1 = n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
+    uint64_t *a1 = n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
     KRML_MAYBE_FOR3(i,
-      (uint32_t)0U,
-      (uint32_t)3U,
-      (uint32_t)1U,
+      0U,
+      3U,
+      1U,
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
     uint64_t c1 = c;
     uint64_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)256U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 256U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -1192,17 +1180,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum256_mont_ctx_init(uint64_t *n)
 {
-  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t));
-  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t));
+  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(4U, sizeof (uint64_t));
+  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(4U, sizeof (uint64_t));
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)4U * sizeof (uint64_t));
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
+  memcpy(n11, n, 4U * sizeof (uint64_t));
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
   precompr2(nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
-  res = { .len = (uint32_t)4U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = 4U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof (
@@ -1330,21 +1316,21 @@ Hacl_Bignum256_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k;
   uint64_t n2[4U] = { 0U };
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
-  uint64_t *a1 = k1.n + (uint32_t)1U;
-  uint64_t *res1 = n2 + (uint32_t)1U;
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
+  uint64_t *a1 = k1.n + 1U;
+  uint64_t *res1 = n2 + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
+    0U,
+    3U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res);
 }
 
 
@@ -1366,36 +1352,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum256_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -1415,36 +1393,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum256_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -1462,12 +1432,8 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum256_bn_to_bytes_be(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(res + i * (uint32_t)8U, b[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(res + i * 8U, b[4U - i - 1U]););
 }
 
 /**
@@ -1479,12 +1445,8 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum256_bn_to_bytes_le(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(res + i * (uint32_t)8U, b[i]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(res + i * 8U, b[i]););
 }
 
 
@@ -1500,14 +1462,14 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum256_lt_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   return acc;
 }
 
@@ -1518,11 +1480,11 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum256_eq_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
diff --git a/src/Hacl_Bignum256_32.c b/src/Hacl_Bignum256_32.c
index ada15309..993dbf84 100644
--- a/src/Hacl_Bignum256_32.c
+++ b/src/Hacl_Bignum256_32.c
@@ -60,26 +60,26 @@ Write `a + b mod 2^256` in `res`.
 */
 uint32_t Hacl_Bignum256_32_add(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i););
   return c;
 }
@@ -93,26 +93,26 @@ Write `a - b mod 2^256` in `res`.
 */
 uint32_t Hacl_Bignum256_32_sub(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i););
   return c;
 }
@@ -129,56 +129,56 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum256_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i););
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -196,57 +196,57 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum256_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i););
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i););
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -260,32 +260,32 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum256_32_mul(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(res, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t bj = b[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
+    uint32_t c = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = a[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = a[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0);
-      uint32_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = a[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1);
-      uint32_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = a[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2);
-      uint32_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = a[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i););
     uint32_t r = c;
-    res[(uint32_t)8U + i0] = r;);
+    res[8U + i0] = r;);
 }
 
 /**
@@ -296,31 +296,31 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(res, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *ab = a;
     uint32_t a_j = a[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -328,29 +328,29 @@ void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res)
     }
     uint32_t r = c;
     res[i0 + i0] = r;);
-  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint32_t tmp[16U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i];
-    uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res1 >> 32U);
     uint32_t lo = (uint32_t)res1;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)512U - nBits; i0++)
+  memset(res, 0U, 8U * sizeof (uint32_t));
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++)
   {
     Hacl_Bignum256_32_add_mod(n, res, res, res);
   }
@@ -358,65 +358,65 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 
 static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i););
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)8U + i0;
-    uint32_t res_j = c[(uint32_t)8U + i0];
+    uint32_t *resb = c + 8U + i0;
+    uint32_t res_j = c[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(res, c + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c1 = (uint32_t)0U;
+  uint32_t c1 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i););
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -425,51 +425,51 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *
 static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a)
 {
   uint32_t tmp[16U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp, aM, 8U * sizeof (uint32_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i););
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)8U + i0;
-    uint32_t res_j = c[(uint32_t)8U + i0];
+    uint32_t *resb = c + 8U + i0;
+    uint32_t res_j = c[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(res, c + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
   uint32_t c1 = Hacl_Bignum256_32_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
     os[i] = x;);
@@ -479,63 +479,63 @@ static inline void
 amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *resM)
 {
   uint32_t c[16U] = { 0U };
-  memset(c, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(c, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t bj = bM[i0];
     uint32_t *res_j = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = aM[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = aM[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c1, res_i0);
-      uint32_t a_i0 = aM[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = aM[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c1, res_i1);
-      uint32_t a_i1 = aM[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = aM[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c1, res_i2);
-      uint32_t a_i2 = aM[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = aM[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c1, res_i););
     uint32_t r = c1;
-    c[(uint32_t)8U + i0] = r;);
+    c[8U + i0] = r;);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *resM)
 {
   uint32_t c[16U] = { 0U };
-  memset(c, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(c, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *ab = aM;
     uint32_t a_j = aM[i0];
     uint32_t *res_j = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c1, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c1, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c1, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c1, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -543,20 +543,20 @@ static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint3
     }
     uint32_t r = c1;
     c[i0 + i0] = r;);
-  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, c, c);
-  KRML_HOST_IGNORE(c0);
+  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, c, c, c);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint32_t tmp[16U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t res = (uint64_t)aM[i] * (uint64_t)aM[i];
-    uint32_t hi = (uint32_t)(res >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res >> 32U);
     uint32_t lo = (uint32_t)res;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, tmp, c);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, c, tmp, c);
+  KRML_MAYBE_UNUSED_VAR(c1);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -565,46 +565,46 @@ bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *r
 {
   uint32_t a_mod[8U] = { 0U };
   uint32_t a1[16U] = { 0U };
-  memcpy(a1, a, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
+  memcpy(a1, a, 16U * sizeof (uint32_t));
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = mu * a1[i0];
     uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
+    uint32_t c = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i););
     uint32_t r = c;
     uint32_t c1 = r;
-    uint32_t *resb = a1 + (uint32_t)8U + i0;
-    uint32_t res_j = a1[(uint32_t)8U + i0];
+    uint32_t *resb = a1 + 8U + i0;
+    uint32_t res_j = a1[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb););
-  memcpy(a_mod, a1 + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(a_mod, a1 + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
   uint32_t c1 = Hacl_Bignum256_32_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = a_mod;
     uint32_t x = (m & tmp[i]) | (~m & a_mod[i]);
     os[i] = x;);
@@ -627,22 +627,22 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t r2[8U] = { 0U };
     precompr2(nBits, n, r2);
@@ -651,68 +651,68 @@ bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
   return m00 & m;
@@ -729,7 +729,7 @@ exp_vartime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[8U] = { 0U };
     uint32_t c[16U] = { 0U };
@@ -737,18 +737,18 @@ exp_vartime_precomp(
     reduction(n, mu, c, aM);
     uint32_t resM[8U] = { 0U };
     uint32_t ctx[16U] = { 0U };
-    memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(ctx, n, 8U * sizeof (uint32_t));
+    memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -757,7 +757,7 @@ exp_vartime_precomp(
       amont_sqr(ctx_n0, mu, aM, aM);
     }
     uint32_t tmp[16U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(tmp, resM, 8U * sizeof (uint32_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -767,74 +767,70 @@ exp_vartime_precomp(
   reduction(n, mu, c, aM);
   uint32_t resM[8U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[16U] = { 0U };
-  memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(ctx, n, 8U * sizeof (uint32_t));
+  memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
   uint32_t table[128U] = { 0U };
   uint32_t tmp[8U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)8U;
+  uint32_t *t1 = table + 8U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)8U;
+  uint32_t *ctx_r20 = ctx + 8U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(t1, aM, 8U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 8U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U;
+    memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 8U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)8U;
-    memcpy(resM, (uint32_t *)a_bits_l, (uint32_t)8U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 8U;
+    memcpy(resM, (uint32_t *)a_bits_l, 8U * sizeof (uint32_t));
   }
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[8U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)8U;
-    memcpy(tmp0, (uint32_t *)a_bits_l, (uint32_t)8U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 8U;
+    memcpy(tmp0, (uint32_t *)a_bits_l, 8U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint32_t tmp1[16U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp1, resM, 8U * sizeof (uint32_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -849,7 +845,7 @@ exp_consttime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[8U] = { 0U };
     uint32_t c[16U] = { 0U };
@@ -857,24 +853,24 @@ exp_consttime_precomp(
     reduction(n, mu, c, aM);
     uint32_t resM[8U] = { 0U };
     uint32_t ctx[16U] = { 0U };
-    memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    memcpy(ctx, n, 8U * sizeof (uint32_t));
+    memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        0U,
+        8U,
+        1U,
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;);
       uint32_t *ctx_n0 = ctx;
@@ -885,14 +881,14 @@ exp_consttime_precomp(
     }
     uint32_t sw0 = sw;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      0U,
+      8U,
+      1U,
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;);
     uint32_t tmp[16U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(tmp, resM, 8U * sizeof (uint32_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -902,56 +898,52 @@ exp_consttime_precomp(
   reduction(n, mu, c0, aM);
   uint32_t resM[8U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[16U] = { 0U };
-  memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(ctx, n, 8U * sizeof (uint32_t));
+  memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
   uint32_t table[128U] = { 0U };
   uint32_t tmp[8U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)8U;
+  uint32_t *t1 = table + 8U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)8U;
+  uint32_t *ctx_r20 = ctx + 8U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(t1, aM, 8U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 8U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U;
+    memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 8U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)table, (uint32_t)8U * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)table, 8U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)8U;
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 8U;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
+        0U,
+        8U,
+        1U,
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
         os[i] = x;););
@@ -959,31 +951,31 @@ exp_consttime_precomp(
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[8U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint32_t *)table, (uint32_t)8U * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)table, 8U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)8U;
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 8U;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
+        0U,
+        8U,
+        1U,
         uint32_t *os = tmp0;
         uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -991,7 +983,7 @@ exp_consttime_precomp(
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint32_t tmp1[16U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp1, resM, 8U * sizeof (uint32_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -1057,16 +1049,16 @@ Hacl_Bignum256_32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -1099,16 +1091,16 @@ Hacl_Bignum256_32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -1129,80 +1121,80 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   uint32_t bn_zero[8U] = { 0U };
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
+  uint32_t mask = 0xFFFFFFFFU;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t n2[8U] = { 0U };
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
-    uint32_t *a1 = n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
+    uint32_t *a1 = n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
     {
-      uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * 0U];
+      uint32_t *res_i0 = res1 + 4U * 0U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * 0U + 1U];
+      uint32_t *res_i1 = res1 + 4U * 0U + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * 0U + 2U];
+      uint32_t *res_i2 = res1 + 4U * 0U + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * 0U + 3U];
+      uint32_t *res_i = res1 + 4U * 0U + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
     KRML_MAYBE_FOR3(i,
-      (uint32_t)4U,
-      (uint32_t)7U,
-      (uint32_t)1U,
+      4U,
+      7U,
+      1U,
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
     uint32_t c1 = c;
     uint32_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)256U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 256U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -1226,16 +1218,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum256_32_mont_ctx_init(uint32_t *n)
 {
-  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
-  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
+  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
+  memcpy(n11, n, 8U * sizeof (uint32_t));
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
   precompr2(nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
-  res = { .len = (uint32_t)8U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = 8U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof (
@@ -1363,35 +1354,35 @@ Hacl_Bignum256_32_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k;
   uint32_t n2[8U] = { 0U };
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
-  uint32_t *a1 = k1.n + (uint32_t)1U;
-  uint32_t *res1 = n2 + (uint32_t)1U;
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
+  uint32_t *a1 = k1.n + 1U;
+  uint32_t *res1 = n2 + 1U;
   uint32_t c = c0;
   {
-    uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U];
-    uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-    uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-    uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-    uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+    uint32_t t1 = a1[4U * 0U];
+    uint32_t *res_i0 = res1 + 4U * 0U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+    uint32_t t10 = a1[4U * 0U + 1U];
+    uint32_t *res_i1 = res1 + 4U * 0U + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+    uint32_t t11 = a1[4U * 0U + 2U];
+    uint32_t *res_i2 = res1 + 4U * 0U + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+    uint32_t t12 = a1[4U * 0U + 3U];
+    uint32_t *res_i = res1 + 4U * 0U + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
   }
   KRML_MAYBE_FOR3(i,
-    (uint32_t)4U,
-    (uint32_t)7U,
-    (uint32_t)1U,
+    4U,
+    7U,
+    1U,
     uint32_t t1 = a1[i];
     uint32_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res);
 }
 
 
@@ -1413,36 +1404,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -1462,36 +1445,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -1509,12 +1484,8 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum256_32_bn_to_bytes_be(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(res + i * (uint32_t)4U, b[(uint32_t)8U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(res + i * 4U, b[8U - i - 1U]););
 }
 
 /**
@@ -1526,12 +1497,8 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum256_32_bn_to_bytes_le(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_le(res + i * (uint32_t)4U, b[i]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_le(res + i * 4U, b[i]););
 }
 
 
@@ -1547,14 +1514,14 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum256_32_lt_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   return acc;
 }
 
@@ -1565,11 +1532,11 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum256_32_eq_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
+  uint32_t mask = 0xFFFFFFFFU;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint32_t mask1 = mask;
diff --git a/src/Hacl_Bignum32.c b/src/Hacl_Bignum32.c
index a9bb4986..3daa3d33 100644
--- a/src/Hacl_Bignum32.c
+++ b/src/Hacl_Bignum32.c
@@ -105,9 +105,9 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, b, tmp, res);
 }
 
@@ -119,9 +119,9 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, a, tmp, res);
 }
 
@@ -142,28 +142,28 @@ bn_slow_precomp(
   uint32_t a1[len + len];
   memset(a1, 0U, (len + len) * sizeof (uint32_t));
   memcpy(a1, a, (len + len) * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = mu * a1[i0];
     uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -181,9 +181,9 @@ bn_slow_precomp(
   uint32_t tmp0[len];
   memset(tmp0, 0U, len * sizeof (uint32_t));
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, a_mod, n, tmp0);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = a_mod;
     uint32_t x = (m & tmp0[i]) | (~m & a_mod[i]);
@@ -192,9 +192,9 @@ bn_slow_precomp(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a_mod, r2, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, res);
 }
@@ -216,20 +216,20 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res)
   uint32_t one[len];
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t r2[len];
@@ -242,7 +242,7 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res)
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -276,8 +276,8 @@ Hacl_Bignum32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, nBits, n, a, bBits, b, res);
   }
@@ -285,7 +285,7 @@ Hacl_Bignum32_mod_exp_vartime(
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -319,8 +319,8 @@ Hacl_Bignum32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32(len, nBits, n, a, bBits, b, res);
   }
@@ -328,7 +328,7 @@ Hacl_Bignum32_mod_exp_consttime(
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -353,23 +353,23 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
   uint32_t one[len];
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t bn_zero[len];
   memset(bn_zero, 0U, len * sizeof (uint32_t));
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -377,53 +377,48 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t n2[len];
     memset(n2, 0U, len * sizeof (uint32_t));
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
     uint32_t c1;
-    if ((uint32_t)1U < len)
+    if (1U < len)
     {
-      uint32_t *a1 = n + (uint32_t)1U;
-      uint32_t *res1 = n2 + (uint32_t)1U;
+      uint32_t *a1 = n + 1U;
+      uint32_t *res1 = n2 + 1U;
       uint32_t c = c0;
-      for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++)
+      for (uint32_t i = 0U; i < (len - 1U) / 4U; i++)
       {
-        uint32_t t1 = a1[(uint32_t)4U * i];
-        uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-        uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-        uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-        uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-        uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-        uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-        uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+        uint32_t t1 = a1[4U * i];
+        uint32_t *res_i0 = res1 + 4U * i;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+        uint32_t t10 = a1[4U * i + 1U];
+        uint32_t *res_i1 = res1 + 4U * i + 1U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+        uint32_t t11 = a1[4U * i + 2U];
+        uint32_t *res_i2 = res1 + 4U * i + 2U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+        uint32_t t12 = a1[4U * i + 3U];
+        uint32_t *res_i = res1 + 4U * i + 3U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
       }
-      for
-      (uint32_t
-        i = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-        i
-        < len - (uint32_t)1U;
-        i++)
+      for (uint32_t i = (len - 1U) / 4U * 4U; i < len - 1U; i++)
       {
         uint32_t t1 = a1[i];
         uint32_t *res_i = res1 + i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
       }
       uint32_t c10 = c;
       c1 = c10;
@@ -432,20 +427,14 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
     {
       c1 = c0;
     }
-    KRML_HOST_IGNORE(c1);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len,
-      nBits,
-      n,
-      a,
-      (uint32_t)32U * len,
-      n2,
-      res);
+    KRML_MAYBE_UNUSED_VAR(c1);
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, nBits, n, a, 32U * len, n2, res);
   }
   else
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -477,7 +466,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -632,38 +621,33 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t n2[len1];
   memset(n2, 0U, len1 * sizeof (uint32_t));
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
   uint32_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint32_t *a1 = k1.n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t *a1 = k1.n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     c1 = c10;
@@ -672,13 +656,13 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len1,
     k1.n,
     k1.mu,
     k1.r2,
     a,
-    (uint32_t)32U * len1,
+    32U * len1,
     n2,
     res);
 }
@@ -702,36 +686,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -751,36 +727,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -797,14 +765,14 @@ Serialize a bignum into big-endian memory.
 */
 void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store32_be(tmp + i * (uint32_t)4U, b[bnLen - i - (uint32_t)1U]);
+    store32_be(tmp + i * 4U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
@@ -817,14 +785,14 @@ Serialize a bignum into little-endian memory.
 */
 void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store32_le(tmp + i * (uint32_t)4U, b[i]);
+    store32_le(tmp + i * 4U, b[i]);
   }
   memcpy(res, tmp, len * sizeof (uint8_t));
 }
@@ -842,12 +810,12 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   return acc;
 }
@@ -859,8 +827,8 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/Hacl_Bignum4096.c b/src/Hacl_Bignum4096.c
index bf8fd6d2..c03696b8 100644
--- a/src/Hacl_Bignum4096.c
+++ b/src/Hacl_Bignum4096.c
@@ -63,26 +63,26 @@ Write `a + b mod 2^4096` in `res`.
 */
 uint64_t Hacl_Bignum4096_add(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i););
   return c;
 }
@@ -96,26 +96,26 @@ Write `a - b mod 2^4096` in `res`.
 */
 uint64_t Hacl_Bignum4096_sub(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i););
   return c;
 }
@@ -132,53 +132,53 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum4096_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i););
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -198,54 +198,54 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum4096_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i););
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i););
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -262,7 +262,7 @@ Write `a * b` in `res`.
 void Hacl_Bignum4096_mul(uint64_t *a, uint64_t *b, uint64_t *res)
 {
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, a, b, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(64U, a, b, tmp, res);
 }
 
 /**
@@ -274,16 +274,16 @@ Write `a * a` in `res`.
 void Hacl_Bignum4096_sqr(uint64_t *a, uint64_t *res)
 {
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, a, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(64U, a, tmp, res);
 }
 
 static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8192U - nBits; i0++)
+  memset(res, 0U, 64U * sizeof (uint64_t));
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++)
   {
     Hacl_Bignum4096_add_mod(n, res, res, res);
   }
@@ -291,61 +291,61 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 
 static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i););
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)64U + i0;
-    uint64_t res_j = c[(uint32_t)64U + i0];
+    uint64_t *resb = c + 64U + i0;
+    uint64_t res_j = c[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(res, c + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c1 = (uint64_t)0U;
+  uint64_t c1 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i););
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -356,47 +356,47 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *
 static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a)
 {
   uint64_t tmp[128U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(tmp, aM, 64U * sizeof (uint64_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i););
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)64U + i0;
-    uint64_t res_j = c[(uint32_t)64U + i0];
+    uint64_t *resb = c + 64U + i0;
+    uint64_t res_j = c[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(res, c + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
   uint64_t c1 = Hacl_Bignum4096_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
@@ -409,7 +409,7 @@ amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *
 {
   uint64_t c[128U] = { 0U };
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, aM, bM, tmp, c);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(64U, aM, bM, tmp, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -417,7 +417,7 @@ static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint6
 {
   uint64_t c[128U] = { 0U };
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, aM, tmp, c);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(64U, aM, tmp, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -426,42 +426,42 @@ bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *r
 {
   uint64_t a_mod[64U] = { 0U };
   uint64_t a1[128U] = { 0U };
-  memcpy(a1, a, (uint32_t)128U * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  memcpy(a1, a, 128U * sizeof (uint64_t));
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = mu * a1[i0];
     uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i););
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = a1 + (uint32_t)64U + i0;
-    uint64_t res_j = a1[(uint32_t)64U + i0];
+    uint64_t *resb = a1 + 64U + i0;
+    uint64_t res_j = a1[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);
   }
-  memcpy(a_mod, a1 + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(a_mod, a1 + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
   uint64_t c1 = Hacl_Bignum4096_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = a_mod;
     uint64_t x = (m & tmp[i]) | (~m & a_mod[i]);
@@ -486,22 +486,21 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t r2[64U] = { 0U };
     precompr2(nBits, n, r2);
@@ -510,65 +509,65 @@ bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
@@ -586,7 +585,7 @@ exp_vartime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[64U] = { 0U };
     uint64_t c[128U] = { 0U };
@@ -594,18 +593,18 @@ exp_vartime_precomp(
     reduction(n, mu, c, aM);
     uint64_t resM[64U] = { 0U };
     uint64_t ctx[128U] = { 0U };
-    memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+    memcpy(ctx, n, 64U * sizeof (uint64_t));
+    memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -614,7 +613,7 @@ exp_vartime_precomp(
       amont_sqr(ctx_n0, mu, aM, aM);
     }
     uint64_t tmp[128U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t));
+    memcpy(tmp, resM, 64U * sizeof (uint64_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -624,74 +623,70 @@ exp_vartime_precomp(
   reduction(n, mu, c, aM);
   uint64_t resM[64U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[128U] = { 0U };
-  memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(ctx, n, 64U * sizeof (uint64_t));
+  memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
   uint64_t table[1024U] = { 0U };
   uint64_t tmp[64U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)64U;
+  uint64_t *t1 = table + 64U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)64U;
+  uint64_t *ctx_r20 = ctx + 64U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(t1, aM, 64U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 64U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U;
+    memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 64U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)64U;
-    memcpy(resM, (uint64_t *)a_bits_l, (uint32_t)64U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 64U;
+    memcpy(resM, (uint64_t *)a_bits_l, 64U * sizeof (uint64_t));
   }
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[64U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)64U;
-    memcpy(tmp0, (uint64_t *)a_bits_l, (uint32_t)64U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 64U;
+    memcpy(tmp0, (uint64_t *)a_bits_l, 64U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint64_t tmp1[128U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(tmp1, resM, 64U * sizeof (uint64_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -706,7 +701,7 @@ exp_consttime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[64U] = { 0U };
     uint64_t c[128U] = { 0U };
@@ -714,22 +709,22 @@ exp_consttime_precomp(
     reduction(n, mu, c, aM);
     uint64_t resM[64U] = { 0U };
     uint64_t ctx[128U] = { 0U };
-    memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    memcpy(ctx, n, 64U * sizeof (uint64_t));
+    memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      for (uint32_t i = 0U; i < 64U; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -740,14 +735,14 @@ exp_consttime_precomp(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+    for (uint32_t i = 0U; i < 64U; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
     uint64_t tmp[128U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t));
+    memcpy(tmp, resM, 64U * sizeof (uint64_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -757,53 +752,49 @@ exp_consttime_precomp(
   reduction(n, mu, c0, aM);
   uint64_t resM[64U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[128U] = { 0U };
-  memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(ctx, n, 64U * sizeof (uint64_t));
+  memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
   uint64_t table[1024U] = { 0U };
   uint64_t tmp[64U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)64U;
+  uint64_t *t1 = table + 64U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)64U;
+  uint64_t *ctx_r20 = ctx + 64U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(t1, aM, 64U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 64U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U;
+    memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 64U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)table, (uint32_t)64U * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)table, 64U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)64U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 64U;
+      for (uint32_t i = 0U; i < 64U; i++)
       {
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -813,28 +804,28 @@ exp_consttime_precomp(
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[64U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)64U * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 64U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)64U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 64U;
+      for (uint32_t i = 0U; i < 64U; i++)
       {
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -844,7 +835,7 @@ exp_consttime_precomp(
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint64_t tmp1[128U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(tmp1, resM, 64U * sizeof (uint64_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -910,17 +901,16 @@ Hacl_Bignum4096_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -953,17 +943,16 @@ Hacl_Bignum4096_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -984,22 +973,22 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   uint64_t bn_zero[64U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -1007,57 +996,56 @@ bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *r
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t n2[64U] = { 0U };
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
-    uint64_t *a1 = n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
+    uint64_t *a1 = n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
     KRML_MAYBE_FOR15(i,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i););
+      0U,
+      15U,
+      1U,
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i););
     KRML_MAYBE_FOR3(i,
-      (uint32_t)60U,
-      (uint32_t)63U,
-      (uint32_t)1U,
+      60U,
+      63U,
+      1U,
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
     uint64_t c1 = c;
     uint64_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 4096U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -1081,17 +1069,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum4096_mont_ctx_init(uint64_t *n)
 {
-  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t));
-  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t));
+  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(64U, sizeof (uint64_t));
+  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(64U, sizeof (uint64_t));
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)64U * sizeof (uint64_t));
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
+  memcpy(n11, n, 64U * sizeof (uint64_t));
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
   precompr2(nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
-  res = { .len = (uint32_t)64U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = 64U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof (
@@ -1219,37 +1205,37 @@ Hacl_Bignum4096_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k;
   uint64_t n2[64U] = { 0U };
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
-  uint64_t *a1 = k1.n + (uint32_t)1U;
-  uint64_t *res1 = n2 + (uint32_t)1U;
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
+  uint64_t *a1 = k1.n + 1U;
+  uint64_t *res1 = n2 + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t t1 = a1[(uint32_t)4U * i];
-    uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-    uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-    uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-    uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i););
+    0U,
+    15U,
+    1U,
+    uint64_t t1 = a1[4U * i];
+    uint64_t *res_i0 = res1 + 4U * i;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+    uint64_t t10 = a1[4U * i + 1U];
+    uint64_t *res_i1 = res1 + 4U * i + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+    uint64_t t11 = a1[4U * i + 2U];
+    uint64_t *res_i2 = res1 + 4U * i + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+    uint64_t t12 = a1[4U * i + 3U];
+    uint64_t *res_i = res1 + 4U * i + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i););
   KRML_MAYBE_FOR3(i,
-    (uint32_t)60U,
-    (uint32_t)63U,
-    (uint32_t)1U,
+    60U,
+    63U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res);
 }
 
 
@@ -1271,36 +1257,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum4096_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -1320,36 +1298,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum4096_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -1367,10 +1337,10 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum4096_bn_to_bytes_be(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 64U; i++)
   {
-    store64_be(res + i * (uint32_t)8U, b[(uint32_t)64U - i - (uint32_t)1U]);
+    store64_be(res + i * 8U, b[64U - i - 1U]);
   }
 }
 
@@ -1383,10 +1353,10 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum4096_bn_to_bytes_le(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 64U; i++)
   {
-    store64_le(res + i * (uint32_t)8U, b[i]);
+    store64_le(res + i * 8U, b[i]);
   }
 }
 
@@ -1403,12 +1373,12 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum4096_lt_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   return acc;
 }
@@ -1420,8 +1390,8 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum4096_eq_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/Hacl_Bignum4096_32.c b/src/Hacl_Bignum4096_32.c
index 2f8d70f1..edc5c84b 100644
--- a/src/Hacl_Bignum4096_32.c
+++ b/src/Hacl_Bignum4096_32.c
@@ -64,24 +64,24 @@ Write `a + b mod 2^4096` in `res`.
 */
 uint32_t Hacl_Bignum4096_32_add(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
   return c;
@@ -96,24 +96,24 @@ Write `a - b mod 2^4096` in `res`.
 */
 uint32_t Hacl_Bignum4096_32_sub(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
   return c;
@@ -131,51 +131,51 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum4096_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i);
   }
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -195,52 +195,52 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum4096_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i);
   }
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -257,7 +257,7 @@ Write `a * b` in `res`.
 void Hacl_Bignum4096_32_mul(uint32_t *a, uint32_t *b, uint32_t *res)
 {
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, a, b, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(128U, a, b, tmp, res);
 }
 
 /**
@@ -269,16 +269,16 @@ Write `a * a` in `res`.
 void Hacl_Bignum4096_32_sqr(uint32_t *a, uint32_t *res)
 {
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, a, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(128U, a, tmp, res);
 }
 
 static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8192U - nBits; i0++)
+  memset(res, 0U, 128U * sizeof (uint32_t));
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++)
   {
     Hacl_Bignum4096_32_add_mod(n, res, res, res);
   }
@@ -286,59 +286,59 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 
 static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)128U + i0;
-    uint32_t res_j = c[(uint32_t)128U + i0];
+    uint32_t *resb = c + 128U + i0;
+    uint32_t res_j = c[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(res, c + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c1 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c1 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i);
   }
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -349,46 +349,46 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *
 static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a)
 {
   uint32_t tmp[256U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(tmp, aM, 128U * sizeof (uint32_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)128U + i0;
-    uint32_t res_j = c[(uint32_t)128U + i0];
+    uint32_t *resb = c + 128U + i0;
+    uint32_t res_j = c[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(res, c + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
   uint32_t c1 = Hacl_Bignum4096_32_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
@@ -401,7 +401,7 @@ amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *
 {
   uint32_t c[256U] = { 0U };
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, aM, bM, tmp, c);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(128U, aM, bM, tmp, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -409,7 +409,7 @@ static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint3
 {
   uint32_t c[256U] = { 0U };
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, aM, tmp, c);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(128U, aM, tmp, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -418,41 +418,41 @@ bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *r
 {
   uint32_t a_mod[128U] = { 0U };
   uint32_t a1[256U] = { 0U };
-  memcpy(a1, a, (uint32_t)256U * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  memcpy(a1, a, 256U * sizeof (uint32_t));
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = mu * a1[i0];
     uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i);
     }
     uint32_t r = c;
     uint32_t c1 = r;
-    uint32_t *resb = a1 + (uint32_t)128U + i0;
-    uint32_t res_j = a1[(uint32_t)128U + i0];
+    uint32_t *resb = a1 + 128U + i0;
+    uint32_t res_j = a1[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb);
   }
-  memcpy(a_mod, a1 + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(a_mod, a1 + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
   uint32_t c1 = Hacl_Bignum4096_32_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = a_mod;
     uint32_t x = (m & tmp[i]) | (~m & a_mod[i]);
@@ -477,21 +477,21 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t r2[128U] = { 0U };
     precompr2(nBits, n, r2);
@@ -500,65 +500,65 @@ bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t b2[bLen];
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
@@ -576,7 +576,7 @@ exp_vartime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[128U] = { 0U };
     uint32_t c[256U] = { 0U };
@@ -584,18 +584,18 @@ exp_vartime_precomp(
     reduction(n, mu, c, aM);
     uint32_t resM[128U] = { 0U };
     uint32_t ctx[256U] = { 0U };
-    memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+    memcpy(ctx, n, 128U * sizeof (uint32_t));
+    memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -604,7 +604,7 @@ exp_vartime_precomp(
       amont_sqr(ctx_n0, mu, aM, aM);
     }
     uint32_t tmp[256U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t));
+    memcpy(tmp, resM, 128U * sizeof (uint32_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -614,74 +614,70 @@ exp_vartime_precomp(
   reduction(n, mu, c, aM);
   uint32_t resM[128U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[256U] = { 0U };
-  memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(ctx, n, 128U * sizeof (uint32_t));
+  memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
   uint32_t table[2048U] = { 0U };
   uint32_t tmp[128U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)128U;
+  uint32_t *t1 = table + 128U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)128U;
+  uint32_t *ctx_r20 = ctx + 128U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(t1, aM, 128U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 128U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U;
+    memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 128U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)128U;
-    memcpy(resM, (uint32_t *)a_bits_l, (uint32_t)128U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 128U;
+    memcpy(resM, (uint32_t *)a_bits_l, 128U * sizeof (uint32_t));
   }
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[128U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)128U;
-    memcpy(tmp0, (uint32_t *)a_bits_l, (uint32_t)128U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 128U;
+    memcpy(tmp0, (uint32_t *)a_bits_l, 128U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint32_t tmp1[256U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(tmp1, resM, 128U * sizeof (uint32_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -696,7 +692,7 @@ exp_consttime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[128U] = { 0U };
     uint32_t c[256U] = { 0U };
@@ -704,22 +700,22 @@ exp_consttime_precomp(
     reduction(n, mu, c, aM);
     uint32_t resM[128U] = { 0U };
     uint32_t ctx[256U] = { 0U };
-    memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    memcpy(ctx, n, 128U * sizeof (uint32_t));
+    memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      for (uint32_t i = 0U; i < 128U; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -730,14 +726,14 @@ exp_consttime_precomp(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+    for (uint32_t i = 0U; i < 128U; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
     uint32_t tmp[256U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t));
+    memcpy(tmp, resM, 128U * sizeof (uint32_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -747,53 +743,49 @@ exp_consttime_precomp(
   reduction(n, mu, c0, aM);
   uint32_t resM[128U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[256U] = { 0U };
-  memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(ctx, n, 128U * sizeof (uint32_t));
+  memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
   uint32_t table[2048U] = { 0U };
   uint32_t tmp[128U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)128U;
+  uint32_t *t1 = table + 128U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)128U;
+  uint32_t *ctx_r20 = ctx + 128U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(t1, aM, 128U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 128U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U;
+    memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 128U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)table, (uint32_t)128U * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)table, 128U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)128U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 128U;
+      for (uint32_t i = 0U; i < 128U; i++)
       {
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -803,28 +795,28 @@ exp_consttime_precomp(
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[128U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint32_t *)table, (uint32_t)128U * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)table, 128U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)128U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 128U;
+      for (uint32_t i = 0U; i < 128U; i++)
       {
         uint32_t *os = tmp0;
         uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -834,7 +826,7 @@ exp_consttime_precomp(
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint32_t tmp1[256U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(tmp1, resM, 128U * sizeof (uint32_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -900,16 +892,16 @@ Hacl_Bignum4096_32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -942,16 +934,16 @@ Hacl_Bignum4096_32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -972,22 +964,22 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   uint32_t bn_zero[128U] = { 0U };
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -995,55 +987,55 @@ bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t n2[128U] = { 0U };
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
-    uint32_t *a1 = n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
+    uint32_t *a1 = n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)31U; i++)
+    for (uint32_t i = 0U; i < 31U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
     KRML_MAYBE_FOR3(i,
-      (uint32_t)124U,
-      (uint32_t)127U,
-      (uint32_t)1U,
+      124U,
+      127U,
+      1U,
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
     uint32_t c1 = c;
     uint32_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 4096U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -1067,16 +1059,16 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum4096_32_mont_ctx_init(uint32_t *n)
 {
-  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t));
-  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t));
+  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(128U, sizeof (uint32_t));
+  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(128U, sizeof (uint32_t));
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
+  memcpy(n11, n, 128U * sizeof (uint32_t));
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
   precompr2(nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
-  res = { .len = (uint32_t)128U, .n = n11, .mu = mu, .r2 = r21 };
+  res = { .len = 128U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof (
@@ -1204,36 +1196,36 @@ Hacl_Bignum4096_32_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k;
   uint32_t n2[128U] = { 0U };
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
-  uint32_t *a1 = k1.n + (uint32_t)1U;
-  uint32_t *res1 = n2 + (uint32_t)1U;
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
+  uint32_t *a1 = k1.n + 1U;
+  uint32_t *res1 = n2 + 1U;
   uint32_t c = c0;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)31U; i++)
+  for (uint32_t i = 0U; i < 31U; i++)
   {
-    uint32_t t1 = a1[(uint32_t)4U * i];
-    uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-    uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-    uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-    uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+    uint32_t t1 = a1[4U * i];
+    uint32_t *res_i0 = res1 + 4U * i;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+    uint32_t t10 = a1[4U * i + 1U];
+    uint32_t *res_i1 = res1 + 4U * i + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+    uint32_t t11 = a1[4U * i + 2U];
+    uint32_t *res_i2 = res1 + 4U * i + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+    uint32_t t12 = a1[4U * i + 3U];
+    uint32_t *res_i = res1 + 4U * i + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
   }
   KRML_MAYBE_FOR3(i,
-    (uint32_t)124U,
-    (uint32_t)127U,
-    (uint32_t)1U,
+    124U,
+    127U,
+    1U,
     uint32_t t1 = a1[i];
     uint32_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res);
 }
 
 
@@ -1255,36 +1247,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -1304,36 +1288,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -1351,10 +1327,10 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum4096_32_bn_to_bytes_be(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 128U; i++)
   {
-    store32_be(res + i * (uint32_t)4U, b[(uint32_t)128U - i - (uint32_t)1U]);
+    store32_be(res + i * 4U, b[128U - i - 1U]);
   }
 }
 
@@ -1367,10 +1343,10 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum4096_32_bn_to_bytes_le(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 128U; i++)
   {
-    store32_le(res + i * (uint32_t)4U, b[i]);
+    store32_le(res + i * 4U, b[i]);
   }
 }
 
@@ -1387,12 +1363,12 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum4096_32_lt_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   return acc;
 }
@@ -1404,8 +1380,8 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum4096_32_eq_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/Hacl_Bignum64.c b/src/Hacl_Bignum64.c
index 7300a993..343b30f0 100644
--- a/src/Hacl_Bignum64.c
+++ b/src/Hacl_Bignum64.c
@@ -104,9 +104,9 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum64_mul(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, b, tmp, res);
 }
 
@@ -118,9 +118,9 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum64_sqr(uint32_t len, uint64_t *a, uint64_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, a, tmp, res);
 }
 
@@ -141,28 +141,28 @@ bn_slow_precomp(
   uint64_t a1[len + len];
   memset(a1, 0U, (len + len) * sizeof (uint64_t));
   memcpy(a1, a, (len + len) * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = mu * a1[i0];
     uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -180,9 +180,9 @@ bn_slow_precomp(
   uint64_t tmp0[len];
   memset(tmp0, 0U, len * sizeof (uint64_t));
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, a_mod, n, tmp0);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = a_mod;
     uint64_t x = (m & tmp0[i]) | (~m & a_mod[i]);
@@ -191,9 +191,9 @@ bn_slow_precomp(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t c[len + len];
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t tmp[(uint32_t)4U * len];
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t tmp[4U * len];
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a_mod, r2, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, res);
 }
@@ -215,20 +215,20 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res)
   uint64_t one[len];
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t r2[len];
@@ -241,7 +241,7 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res)
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -275,8 +275,8 @@ Hacl_Bignum64_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, nBits, n, a, bBits, b, res);
   }
@@ -284,7 +284,7 @@ Hacl_Bignum64_mod_exp_vartime(
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -318,8 +318,8 @@ Hacl_Bignum64_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64(len, nBits, n, a, bBits, b, res);
   }
@@ -327,7 +327,7 @@ Hacl_Bignum64_mod_exp_consttime(
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -352,23 +352,23 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
   uint64_t one[len];
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t bn_zero[len];
   memset(bn_zero, 0U, len * sizeof (uint64_t));
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -376,53 +376,48 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t n2[len];
     memset(n2, 0U, len * sizeof (uint64_t));
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
     uint64_t c1;
-    if ((uint32_t)1U < len)
+    if (1U < len)
     {
-      uint64_t *a1 = n + (uint32_t)1U;
-      uint64_t *res1 = n2 + (uint32_t)1U;
+      uint64_t *a1 = n + 1U;
+      uint64_t *res1 = n2 + 1U;
       uint64_t c = c0;
-      for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++)
+      for (uint32_t i = 0U; i < (len - 1U) / 4U; i++)
       {
-        uint64_t t1 = a1[(uint32_t)4U * i];
-        uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-        uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-        uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-        uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-        uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-        uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-        uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+        uint64_t t1 = a1[4U * i];
+        uint64_t *res_i0 = res1 + 4U * i;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+        uint64_t t10 = a1[4U * i + 1U];
+        uint64_t *res_i1 = res1 + 4U * i + 1U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+        uint64_t t11 = a1[4U * i + 2U];
+        uint64_t *res_i2 = res1 + 4U * i + 2U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+        uint64_t t12 = a1[4U * i + 3U];
+        uint64_t *res_i = res1 + 4U * i + 3U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
       }
-      for
-      (uint32_t
-        i = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-        i
-        < len - (uint32_t)1U;
-        i++)
+      for (uint32_t i = (len - 1U) / 4U * 4U; i < len - 1U; i++)
       {
         uint64_t t1 = a1[i];
         uint64_t *res_i = res1 + i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
       }
       uint64_t c10 = c;
       c1 = c10;
@@ -431,20 +426,14 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
     {
       c1 = c0;
     }
-    KRML_HOST_IGNORE(c1);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len,
-      nBits,
-      n,
-      a,
-      (uint32_t)64U * len,
-      n2,
-      res);
+    KRML_MAYBE_UNUSED_VAR(c1);
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, nBits, n, a, 64U * len, n2, res);
   }
   else
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -476,7 +465,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint64_t));
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -631,38 +620,33 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t n2[len1];
   memset(n2, 0U, len1 * sizeof (uint64_t));
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
   uint64_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint64_t *a1 = k1.n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t *a1 = k1.n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     c1 = c10;
@@ -671,13 +655,13 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len1,
     k1.n,
     k1.mu,
     k1.r2,
     a,
-    (uint32_t)64U * len1,
+    64U * len1,
     n2,
     res);
 }
@@ -701,36 +685,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum64_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -750,36 +726,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum64_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -796,14 +764,14 @@ Serialize a bignum into big-endian memory.
 */
 void Hacl_Bignum64_bn_to_bytes_be(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]);
+    store64_be(tmp + i * 8U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
@@ -816,14 +784,14 @@ Serialize a bignum into little-endian memory.
 */
 void Hacl_Bignum64_bn_to_bytes_le(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t tmp[tmpLen];
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_le(tmp + i * (uint32_t)8U, b[i]);
+    store64_le(tmp + i * 8U, b[i]);
   }
   memcpy(res, tmp, len * sizeof (uint8_t));
 }
@@ -841,12 +809,12 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum64_lt_mask(uint32_t len, uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   return acc;
 }
@@ -858,8 +826,8 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum64_eq_mask(uint32_t len, uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/Hacl_Chacha20.c b/src/Hacl_Chacha20.c
index 8966e19e..38a5c373 100644
--- a/src/Hacl_Chacha20.c
+++ b/src/Hacl_Chacha20.c
@@ -28,7 +28,7 @@
 const
 uint32_t
 Hacl_Impl_Chacha20_Vec_chacha20_constants[4U] =
-  { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
+  { 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U };
 
 static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
 {
@@ -37,7 +37,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std0 = st[d];
   uint32_t sta10 = sta + stb0;
   uint32_t std10 = std0 ^ sta10;
-  uint32_t std2 = std10 << (uint32_t)16U | std10 >> (uint32_t)16U;
+  uint32_t std2 = std10 << 16U | std10 >> 16U;
   st[a] = sta10;
   st[d] = std2;
   uint32_t sta0 = st[c];
@@ -45,7 +45,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std3 = st[b];
   uint32_t sta11 = sta0 + stb1;
   uint32_t std11 = std3 ^ sta11;
-  uint32_t std20 = std11 << (uint32_t)12U | std11 >> (uint32_t)20U;
+  uint32_t std20 = std11 << 12U | std11 >> 20U;
   st[c] = sta11;
   st[b] = std20;
   uint32_t sta2 = st[a];
@@ -53,7 +53,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std4 = st[d];
   uint32_t sta12 = sta2 + stb2;
   uint32_t std12 = std4 ^ sta12;
-  uint32_t std21 = std12 << (uint32_t)8U | std12 >> (uint32_t)24U;
+  uint32_t std21 = std12 << 8U | std12 >> 24U;
   st[a] = sta12;
   st[d] = std21;
   uint32_t sta3 = st[c];
@@ -61,21 +61,21 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std = st[b];
   uint32_t sta1 = sta3 + stb;
   uint32_t std1 = std ^ sta1;
-  uint32_t std22 = std1 << (uint32_t)7U | std1 >> (uint32_t)25U;
+  uint32_t std22 = std1 << 7U | std1 >> 25U;
   st[c] = sta1;
   st[b] = std22;
 }
 
 static inline void double_round(uint32_t *st)
 {
-  quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)1U, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U);
-  quarter_round(st, (uint32_t)2U, (uint32_t)6U, (uint32_t)10U, (uint32_t)14U);
-  quarter_round(st, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U, (uint32_t)15U);
-  quarter_round(st, (uint32_t)0U, (uint32_t)5U, (uint32_t)10U, (uint32_t)15U);
-  quarter_round(st, (uint32_t)1U, (uint32_t)6U, (uint32_t)11U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)2U, (uint32_t)7U, (uint32_t)8U, (uint32_t)13U);
-  quarter_round(st, (uint32_t)3U, (uint32_t)4U, (uint32_t)9U, (uint32_t)14U);
+  quarter_round(st, 0U, 4U, 8U, 12U);
+  quarter_round(st, 1U, 5U, 9U, 13U);
+  quarter_round(st, 2U, 6U, 10U, 14U);
+  quarter_round(st, 3U, 7U, 11U, 15U);
+  quarter_round(st, 0U, 5U, 10U, 15U);
+  quarter_round(st, 1U, 6U, 11U, 12U);
+  quarter_round(st, 2U, 7U, 8U, 13U);
+  quarter_round(st, 3U, 4U, 9U, 14U);
 }
 
 static inline void rounds(uint32_t *st)
@@ -94,14 +94,14 @@ static inline void rounds(uint32_t *st)
 
 static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
   uint32_t ctr_u32 = ctr;
   k[12U] = k[12U] + ctr_u32;
   rounds(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -110,35 +110,34 @@ static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 
 static const
 uint32_t
-chacha20_constants[4U] =
-  { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
+chacha20_constants[4U] = { 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U };
 
 void Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n, uint32_t ctr)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx;
     uint32_t x = chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -151,27 +150,23 @@ static void chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr, u
   chacha20_core(k, ctx, incr);
   uint32_t bl[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = bl;
-    uint8_t *bj = text + i * (uint32_t)4U;
+    uint8_t *bj = text + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = bl;
     uint32_t x = bl[i] ^ k[i];
     os[i] = x;);
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, bl[i]););
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(out + i * 4U, bl[i]););
 }
 
 static inline void
@@ -186,16 +181,16 @@ chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr,
 void
 Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
 {
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    chacha20_encrypt_block(ctx, out + i * (uint32_t)64U, i, text + i * (uint32_t)64U);
+    chacha20_encrypt_block(ctx, out + i * 64U, i, text + i * 64U);
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    chacha20_encrypt_last(ctx, rem, out + nb * (uint32_t)64U, nb, text + nb * (uint32_t)64U);
+    chacha20_encrypt_last(ctx, rem, out + nb * 64U, nb, text + nb * 64U);
   }
 }
 
diff --git a/src/Hacl_Chacha20_Vec128.c b/src/Hacl_Chacha20_Vec128.c
index 1e0c4ec1..deab1dfc 100644
--- a/src/Hacl_Chacha20_Vec128.c
+++ b/src/Hacl_Chacha20_Vec128.c
@@ -32,100 +32,100 @@ static inline void double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
 {
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std0 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std0, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std0, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std1 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std1, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std1, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std2 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std2, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std2, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std3 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std3, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std3, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std4 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std4, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std4, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std5 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std5, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std5, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std6 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std6, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std6, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std7 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std7, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std7, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std8 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std8, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std8, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std9 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std9, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std9, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std10 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std10, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std10, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std11 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std11, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std11, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std12 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std12, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std12, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std13 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std13, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std13, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std14 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std14, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std14, 7U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std15 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std15, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std15, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std16 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std16, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std16, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std17 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std17, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std17, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std18 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std18, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std18, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std19 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std19, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std19, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std20 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std20, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std20, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std21 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std21, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std21, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std22 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std22, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std22, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std23 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std23, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std23, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std24 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std24, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std24, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std25 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std25, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std25, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std26 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std26, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std26, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std27 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std27, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std27, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std28 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std28, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std28, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std29 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std29, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std29, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std30 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, 7U);
 }
 
 static inline void
@@ -135,8 +135,8 @@ chacha20_core_128(
   uint32_t ctr
 )
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  uint32_t ctr_u32 = (uint32_t)4U * ctr;
+  memcpy(k, ctx, 16U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  uint32_t ctr_u32 = 4U * ctr;
   Lib_IntVector_Intrinsics_vec128 cv = Lib_IntVector_Intrinsics_vec128_load32(ctr_u32);
   k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv);
   double_round_128(k);
@@ -150,9 +150,9 @@ chacha20_core_128(
   double_round_128(k);
   double_round_128(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = k;
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(k[i], ctx[i]);
     os[i] = x;);
@@ -164,47 +164,42 @@ chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n,
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = ctx;
     uint32_t x = ctx1[i];
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_load32(x);
     os[i] = x0;);
-  Lib_IntVector_Intrinsics_vec128
-  ctr1 =
-    Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)0U,
-      (uint32_t)1U,
-      (uint32_t)2U,
-      (uint32_t)3U);
+  Lib_IntVector_Intrinsics_vec128 ctr1 = Lib_IntVector_Intrinsics_vec128_load32s(0U, 1U, 2U, 3U);
   Lib_IntVector_Intrinsics_vec128 c12 = ctx[12U];
   ctx[12U] = Lib_IntVector_Intrinsics_vec128_add32(c12, ctr1);
 }
@@ -221,13 +216,13 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U };
   chacha20_init_128(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)256U;
-  uint32_t nb = len / (uint32_t)256U;
-  uint32_t rem1 = len % (uint32_t)256U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 256U;
+  uint32_t nb = len / 256U;
+  uint32_t rem1 = len % 256U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)256U;
-    uint8_t *uu____1 = text + i * (uint32_t)256U;
+    uint8_t *uu____0 = out + i * 256U;
+    uint8_t *uu____1 = text + i * 256U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, i);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -359,19 +354,19 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * 16U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)256U;
+    uint8_t *uu____2 = out + nb * 256U;
     uint8_t plain[256U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)256U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 256U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -503,13 +498,13 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * 16U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -526,13 +521,13 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U };
   chacha20_init_128(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)256U;
-  uint32_t nb = len / (uint32_t)256U;
-  uint32_t rem1 = len % (uint32_t)256U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 256U;
+  uint32_t nb = len / 256U;
+  uint32_t rem1 = len % 256U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)256U;
-    uint8_t *uu____1 = cipher + i * (uint32_t)256U;
+    uint8_t *uu____0 = out + i * 256U;
+    uint8_t *uu____1 = cipher + i * 256U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, i);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -664,19 +659,19 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * 16U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)256U;
+    uint8_t *uu____2 = out + nb * 256U;
     uint8_t plain[256U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)256U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 256U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -808,13 +803,13 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * 16U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/Hacl_Chacha20_Vec256.c b/src/Hacl_Chacha20_Vec256.c
index 620f5040..e61a7cfe 100644
--- a/src/Hacl_Chacha20_Vec256.c
+++ b/src/Hacl_Chacha20_Vec256.c
@@ -32,100 +32,100 @@ static inline void double_round_256(Lib_IntVector_Intrinsics_vec256 *st)
 {
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std0 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std1 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std2 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std3 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std4 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std5 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std6 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std7 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std8 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std9 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std10 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std11 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std12 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std13 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std14 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, 7U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std15 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std16 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std17 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std18 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std19 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std20 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std21 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std22 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std23 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std24 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std25 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std26 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std27 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std28 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std29 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std30 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, 7U);
 }
 
 static inline void
@@ -135,8 +135,8 @@ chacha20_core_256(
   uint32_t ctr
 )
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  uint32_t ctr_u32 = (uint32_t)8U * ctr;
+  memcpy(k, ctx, 16U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  uint32_t ctr_u32 = 8U * ctr;
   Lib_IntVector_Intrinsics_vec256 cv = Lib_IntVector_Intrinsics_vec256_load32(ctr_u32);
   k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv);
   double_round_256(k);
@@ -150,9 +150,9 @@ chacha20_core_256(
   double_round_256(k);
   double_round_256(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = k;
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add32(k[i], ctx[i]);
     os[i] = x;);
@@ -164,51 +164,43 @@ chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n,
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = ctx;
     uint32_t x = ctx1[i];
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_load32(x);
     os[i] = x0;);
   Lib_IntVector_Intrinsics_vec256
-  ctr1 =
-    Lib_IntVector_Intrinsics_vec256_load32s((uint32_t)0U,
-      (uint32_t)1U,
-      (uint32_t)2U,
-      (uint32_t)3U,
-      (uint32_t)4U,
-      (uint32_t)5U,
-      (uint32_t)6U,
-      (uint32_t)7U);
+  ctr1 = Lib_IntVector_Intrinsics_vec256_load32s(0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U);
   Lib_IntVector_Intrinsics_vec256 c12 = ctx[12U];
   ctx[12U] = Lib_IntVector_Intrinsics_vec256_add32(c12, ctr1);
 }
@@ -225,13 +217,13 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U };
   chacha20_init_256(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)512U;
-  uint32_t nb = len / (uint32_t)512U;
-  uint32_t rem1 = len % (uint32_t)512U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 512U;
+  uint32_t nb = len / 512U;
+  uint32_t rem1 = len % 512U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)512U;
-    uint8_t *uu____1 = text + i * (uint32_t)512U;
+    uint8_t *uu____0 = out + i * 512U;
+    uint8_t *uu____1 = text + i * 512U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, i);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -459,19 +451,19 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * 32U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)512U;
+    uint8_t *uu____2 = out + nb * 512U;
     uint8_t plain[512U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)512U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 512U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -699,13 +691,13 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * 32U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -722,13 +714,13 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U };
   chacha20_init_256(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)512U;
-  uint32_t nb = len / (uint32_t)512U;
-  uint32_t rem1 = len % (uint32_t)512U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 512U;
+  uint32_t nb = len / 512U;
+  uint32_t rem1 = len % 512U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)512U;
-    uint8_t *uu____1 = cipher + i * (uint32_t)512U;
+    uint8_t *uu____0 = out + i * 512U;
+    uint8_t *uu____1 = cipher + i * 512U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, i);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -956,19 +948,19 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * 32U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)512U;
+    uint8_t *uu____2 = out + nb * 512U;
     uint8_t plain[512U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)512U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 512U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -1196,13 +1188,13 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * 32U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/Hacl_Chacha20_Vec32.c b/src/Hacl_Chacha20_Vec32.c
index 2bf4764c..0dce915c 100644
--- a/src/Hacl_Chacha20_Vec32.c
+++ b/src/Hacl_Chacha20_Vec32.c
@@ -31,106 +31,106 @@ static inline void double_round_32(uint32_t *st)
 {
   st[0U] = st[0U] + st[4U];
   uint32_t std = st[12U] ^ st[0U];
-  st[12U] = std << (uint32_t)16U | std >> (uint32_t)16U;
+  st[12U] = std << 16U | std >> 16U;
   st[8U] = st[8U] + st[12U];
   uint32_t std0 = st[4U] ^ st[8U];
-  st[4U] = std0 << (uint32_t)12U | std0 >> (uint32_t)20U;
+  st[4U] = std0 << 12U | std0 >> 20U;
   st[0U] = st[0U] + st[4U];
   uint32_t std1 = st[12U] ^ st[0U];
-  st[12U] = std1 << (uint32_t)8U | std1 >> (uint32_t)24U;
+  st[12U] = std1 << 8U | std1 >> 24U;
   st[8U] = st[8U] + st[12U];
   uint32_t std2 = st[4U] ^ st[8U];
-  st[4U] = std2 << (uint32_t)7U | std2 >> (uint32_t)25U;
+  st[4U] = std2 << 7U | std2 >> 25U;
   st[1U] = st[1U] + st[5U];
   uint32_t std3 = st[13U] ^ st[1U];
-  st[13U] = std3 << (uint32_t)16U | std3 >> (uint32_t)16U;
+  st[13U] = std3 << 16U | std3 >> 16U;
   st[9U] = st[9U] + st[13U];
   uint32_t std4 = st[5U] ^ st[9U];
-  st[5U] = std4 << (uint32_t)12U | std4 >> (uint32_t)20U;
+  st[5U] = std4 << 12U | std4 >> 20U;
   st[1U] = st[1U] + st[5U];
   uint32_t std5 = st[13U] ^ st[1U];
-  st[13U] = std5 << (uint32_t)8U | std5 >> (uint32_t)24U;
+  st[13U] = std5 << 8U | std5 >> 24U;
   st[9U] = st[9U] + st[13U];
   uint32_t std6 = st[5U] ^ st[9U];
-  st[5U] = std6 << (uint32_t)7U | std6 >> (uint32_t)25U;
+  st[5U] = std6 << 7U | std6 >> 25U;
   st[2U] = st[2U] + st[6U];
   uint32_t std7 = st[14U] ^ st[2U];
-  st[14U] = std7 << (uint32_t)16U | std7 >> (uint32_t)16U;
+  st[14U] = std7 << 16U | std7 >> 16U;
   st[10U] = st[10U] + st[14U];
   uint32_t std8 = st[6U] ^ st[10U];
-  st[6U] = std8 << (uint32_t)12U | std8 >> (uint32_t)20U;
+  st[6U] = std8 << 12U | std8 >> 20U;
   st[2U] = st[2U] + st[6U];
   uint32_t std9 = st[14U] ^ st[2U];
-  st[14U] = std9 << (uint32_t)8U | std9 >> (uint32_t)24U;
+  st[14U] = std9 << 8U | std9 >> 24U;
   st[10U] = st[10U] + st[14U];
   uint32_t std10 = st[6U] ^ st[10U];
-  st[6U] = std10 << (uint32_t)7U | std10 >> (uint32_t)25U;
+  st[6U] = std10 << 7U | std10 >> 25U;
   st[3U] = st[3U] + st[7U];
   uint32_t std11 = st[15U] ^ st[3U];
-  st[15U] = std11 << (uint32_t)16U | std11 >> (uint32_t)16U;
+  st[15U] = std11 << 16U | std11 >> 16U;
   st[11U] = st[11U] + st[15U];
   uint32_t std12 = st[7U] ^ st[11U];
-  st[7U] = std12 << (uint32_t)12U | std12 >> (uint32_t)20U;
+  st[7U] = std12 << 12U | std12 >> 20U;
   st[3U] = st[3U] + st[7U];
   uint32_t std13 = st[15U] ^ st[3U];
-  st[15U] = std13 << (uint32_t)8U | std13 >> (uint32_t)24U;
+  st[15U] = std13 << 8U | std13 >> 24U;
   st[11U] = st[11U] + st[15U];
   uint32_t std14 = st[7U] ^ st[11U];
-  st[7U] = std14 << (uint32_t)7U | std14 >> (uint32_t)25U;
+  st[7U] = std14 << 7U | std14 >> 25U;
   st[0U] = st[0U] + st[5U];
   uint32_t std15 = st[15U] ^ st[0U];
-  st[15U] = std15 << (uint32_t)16U | std15 >> (uint32_t)16U;
+  st[15U] = std15 << 16U | std15 >> 16U;
   st[10U] = st[10U] + st[15U];
   uint32_t std16 = st[5U] ^ st[10U];
-  st[5U] = std16 << (uint32_t)12U | std16 >> (uint32_t)20U;
+  st[5U] = std16 << 12U | std16 >> 20U;
   st[0U] = st[0U] + st[5U];
   uint32_t std17 = st[15U] ^ st[0U];
-  st[15U] = std17 << (uint32_t)8U | std17 >> (uint32_t)24U;
+  st[15U] = std17 << 8U | std17 >> 24U;
   st[10U] = st[10U] + st[15U];
   uint32_t std18 = st[5U] ^ st[10U];
-  st[5U] = std18 << (uint32_t)7U | std18 >> (uint32_t)25U;
+  st[5U] = std18 << 7U | std18 >> 25U;
   st[1U] = st[1U] + st[6U];
   uint32_t std19 = st[12U] ^ st[1U];
-  st[12U] = std19 << (uint32_t)16U | std19 >> (uint32_t)16U;
+  st[12U] = std19 << 16U | std19 >> 16U;
   st[11U] = st[11U] + st[12U];
   uint32_t std20 = st[6U] ^ st[11U];
-  st[6U] = std20 << (uint32_t)12U | std20 >> (uint32_t)20U;
+  st[6U] = std20 << 12U | std20 >> 20U;
   st[1U] = st[1U] + st[6U];
   uint32_t std21 = st[12U] ^ st[1U];
-  st[12U] = std21 << (uint32_t)8U | std21 >> (uint32_t)24U;
+  st[12U] = std21 << 8U | std21 >> 24U;
   st[11U] = st[11U] + st[12U];
   uint32_t std22 = st[6U] ^ st[11U];
-  st[6U] = std22 << (uint32_t)7U | std22 >> (uint32_t)25U;
+  st[6U] = std22 << 7U | std22 >> 25U;
   st[2U] = st[2U] + st[7U];
   uint32_t std23 = st[13U] ^ st[2U];
-  st[13U] = std23 << (uint32_t)16U | std23 >> (uint32_t)16U;
+  st[13U] = std23 << 16U | std23 >> 16U;
   st[8U] = st[8U] + st[13U];
   uint32_t std24 = st[7U] ^ st[8U];
-  st[7U] = std24 << (uint32_t)12U | std24 >> (uint32_t)20U;
+  st[7U] = std24 << 12U | std24 >> 20U;
   st[2U] = st[2U] + st[7U];
   uint32_t std25 = st[13U] ^ st[2U];
-  st[13U] = std25 << (uint32_t)8U | std25 >> (uint32_t)24U;
+  st[13U] = std25 << 8U | std25 >> 24U;
   st[8U] = st[8U] + st[13U];
   uint32_t std26 = st[7U] ^ st[8U];
-  st[7U] = std26 << (uint32_t)7U | std26 >> (uint32_t)25U;
+  st[7U] = std26 << 7U | std26 >> 25U;
   st[3U] = st[3U] + st[4U];
   uint32_t std27 = st[14U] ^ st[3U];
-  st[14U] = std27 << (uint32_t)16U | std27 >> (uint32_t)16U;
+  st[14U] = std27 << 16U | std27 >> 16U;
   st[9U] = st[9U] + st[14U];
   uint32_t std28 = st[4U] ^ st[9U];
-  st[4U] = std28 << (uint32_t)12U | std28 >> (uint32_t)20U;
+  st[4U] = std28 << 12U | std28 >> 20U;
   st[3U] = st[3U] + st[4U];
   uint32_t std29 = st[14U] ^ st[3U];
-  st[14U] = std29 << (uint32_t)8U | std29 >> (uint32_t)24U;
+  st[14U] = std29 << 8U | std29 >> 24U;
   st[9U] = st[9U] + st[14U];
   uint32_t std30 = st[4U] ^ st[9U];
-  st[4U] = std30 << (uint32_t)7U | std30 >> (uint32_t)25U;
+  st[4U] = std30 << 7U | std30 >> 25U;
 }
 
 static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t ctr_u32 = (uint32_t)1U * ctr;
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
+  uint32_t ctr_u32 = 1U * ctr;
   uint32_t cv = ctr_u32;
   k[12U] = k[12U] + cv;
   double_round_32(k);
@@ -144,9 +144,9 @@ static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr)
   double_round_32(k);
   double_round_32(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -157,41 +157,41 @@ static inline void chacha20_init_32(uint32_t *ctx, uint8_t *k, uint8_t *n, uint3
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = ctx;
     uint32_t x = ctx1[i];
     os[i] = x;);
-  uint32_t ctr1 = (uint32_t)0U;
+  uint32_t ctr1 = 0U;
   uint32_t c12 = ctx[12U];
   ctx[12U] = c12 + ctr1;
 }
@@ -208,39 +208,39 @@ Hacl_Chacha20_Vec32_chacha20_encrypt_32(
 {
   uint32_t ctx[16U] = { 0U };
   chacha20_init_32(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = text + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = text + i0 * 64U;
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, i0);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(uu____1 + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(uu____1 + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(uu____0 + i * (uint32_t)4U, y););
+      store32_le(uu____0 + i * 4U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, nb);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(plain + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(plain + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(plain + i * (uint32_t)4U, y););
+      store32_le(plain + i * 4U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -257,39 +257,39 @@ Hacl_Chacha20_Vec32_chacha20_decrypt_32(
 {
   uint32_t ctx[16U] = { 0U };
   chacha20_init_32(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = cipher + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = cipher + i0 * 64U;
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, i0);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(uu____1 + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(uu____1 + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(uu____0 + i * (uint32_t)4U, y););
+      store32_le(uu____0 + i * 4U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, nb);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(plain + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(plain + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(plain + i * (uint32_t)4U, y););
+      store32_le(plain + i * 4U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/Hacl_Curve25519_51.c b/src/Hacl_Curve25519_51.c
index 64c855cf..ca561e89 100644
--- a/src/Hacl_Curve25519_51.c
+++ b/src/Hacl_Curve25519_51.c
@@ -28,38 +28,38 @@
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Bignum25519_51.h"
 
-static const uint8_t g25519[32U] = { (uint8_t)9U };
+static const uint8_t g25519[32U] = { 9U };
 
 static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_uint128 *tmp2)
 {
   uint64_t *nq = p01_tmp1;
-  uint64_t *nq_p1 = p01_tmp1 + (uint32_t)10U;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
+  uint64_t *nq_p1 = p01_tmp1 + 10U;
+  uint64_t *tmp1 = p01_tmp1 + 20U;
   uint64_t *x1 = q;
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)5U;
-  uint64_t *z3 = nq_p1 + (uint32_t)5U;
+  uint64_t *z2 = nq + 5U;
+  uint64_t *z3 = nq_p1 + 5U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)5U;
+  uint64_t *b = tmp1 + 5U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)10U;
+  uint64_t *dc = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
   uint64_t *x3 = nq_p1;
-  uint64_t *z31 = nq_p1 + (uint32_t)5U;
+  uint64_t *z31 = nq_p1 + 5U;
   uint64_t *d0 = dc;
-  uint64_t *c0 = dc + (uint32_t)5U;
+  uint64_t *c0 = dc + 5U;
   Hacl_Impl_Curve25519_Field51_fadd(c0, x3, z31);
   Hacl_Impl_Curve25519_Field51_fsub(d0, x3, z31);
   Hacl_Impl_Curve25519_Field51_fmul2(dc, dc, ab, tmp2);
   Hacl_Impl_Curve25519_Field51_fadd(x3, d0, c0);
   Hacl_Impl_Curve25519_Field51_fsub(z31, d0, c0);
   uint64_t *a1 = tmp1;
-  uint64_t *b1 = tmp1 + (uint32_t)5U;
-  uint64_t *d = tmp1 + (uint32_t)10U;
-  uint64_t *c = tmp1 + (uint32_t)15U;
+  uint64_t *b1 = tmp1 + 5U;
+  uint64_t *d = tmp1 + 10U;
+  uint64_t *c = tmp1 + 15U;
   uint64_t *ab1 = tmp1;
-  uint64_t *dc1 = tmp1 + (uint32_t)10U;
+  uint64_t *dc1 = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab1, tmp2);
   Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, nq_p1, tmp2);
   a1[0U] = c[0U];
@@ -68,7 +68,7 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_
   a1[3U] = c[3U];
   a1[4U] = c[4U];
   Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
-  Hacl_Impl_Curve25519_Field51_fmul1(b1, c, (uint64_t)121665U);
+  Hacl_Impl_Curve25519_Field51_fmul1(b1, c, 121665ULL);
   Hacl_Impl_Curve25519_Field51_fadd(b1, b1, d);
   Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2);
   Hacl_Impl_Curve25519_Field51_fmul(z3, z3, x1, tmp2);
@@ -77,13 +77,13 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_
 static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
 {
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)5U;
+  uint64_t *z2 = nq + 5U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)5U;
-  uint64_t *d = tmp1 + (uint32_t)10U;
-  uint64_t *c = tmp1 + (uint32_t)15U;
+  uint64_t *b = tmp1 + 5U;
+  uint64_t *d = tmp1 + 10U;
+  uint64_t *c = tmp1 + 15U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)10U;
+  uint64_t *dc = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsqr2(dc, ab, tmp2);
@@ -93,7 +93,7 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tm
   a[3U] = c[3U];
   a[4U] = c[4U];
   Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
-  Hacl_Impl_Curve25519_Field51_fmul1(b, c, (uint64_t)121665U);
+  Hacl_Impl_Curve25519_Field51_fmul1(b, c, 121665ULL);
   Hacl_Impl_Curve25519_Field51_fadd(b, b, d);
   Hacl_Impl_Curve25519_Field51_fmul2(nq, dc, ab, tmp2);
 }
@@ -101,46 +101,41 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tm
 static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
 {
   FStar_UInt128_uint128 tmp2[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp2[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp2[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   uint64_t p01_tmp1_swap[41U] = { 0U };
   uint64_t *p0 = p01_tmp1_swap;
   uint64_t *p01 = p01_tmp1_swap;
   uint64_t *p03 = p01;
-  uint64_t *p11 = p01 + (uint32_t)10U;
-  memcpy(p11, init, (uint32_t)10U * sizeof (uint64_t));
+  uint64_t *p11 = p01 + 10U;
+  memcpy(p11, init, 10U * sizeof (uint64_t));
   uint64_t *x0 = p03;
-  uint64_t *z0 = p03 + (uint32_t)5U;
-  x0[0U] = (uint64_t)1U;
-  x0[1U] = (uint64_t)0U;
-  x0[2U] = (uint64_t)0U;
-  x0[3U] = (uint64_t)0U;
-  x0[4U] = (uint64_t)0U;
-  z0[0U] = (uint64_t)0U;
-  z0[1U] = (uint64_t)0U;
-  z0[2U] = (uint64_t)0U;
-  z0[3U] = (uint64_t)0U;
-  z0[4U] = (uint64_t)0U;
+  uint64_t *z0 = p03 + 5U;
+  x0[0U] = 1ULL;
+  x0[1U] = 0ULL;
+  x0[2U] = 0ULL;
+  x0[3U] = 0ULL;
+  x0[4U] = 0ULL;
+  z0[0U] = 0ULL;
+  z0[1U] = 0ULL;
+  z0[2U] = 0ULL;
+  z0[3U] = 0ULL;
+  z0[4U] = 0ULL;
   uint64_t *p01_tmp1 = p01_tmp1_swap;
   uint64_t *p01_tmp11 = p01_tmp1_swap;
   uint64_t *nq1 = p01_tmp1_swap;
-  uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)10U;
-  uint64_t *swap = p01_tmp1_swap + (uint32_t)40U;
-  Hacl_Impl_Curve25519_Field51_cswap2((uint64_t)1U, nq1, nq_p11);
+  uint64_t *nq_p11 = p01_tmp1_swap + 10U;
+  uint64_t *swap = p01_tmp1_swap + 40U;
+  Hacl_Impl_Curve25519_Field51_cswap2(1ULL, nq1, nq_p11);
   point_add_and_double(init, p01_tmp11, tmp2);
-  swap[0U] = (uint64_t)1U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++)
+  swap[0U] = 1ULL;
+  for (uint32_t i = 0U; i < 251U; i++)
   {
     uint64_t *p01_tmp12 = p01_tmp1_swap;
-    uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U;
+    uint64_t *swap1 = p01_tmp1_swap + 40U;
     uint64_t *nq2 = p01_tmp12;
-    uint64_t *nq_p12 = p01_tmp12 + (uint32_t)10U;
-    uint64_t
-    bit =
-      (uint64_t)(key[((uint32_t)253U - i)
-      / (uint32_t)8U]
-      >> ((uint32_t)253U - i) % (uint32_t)8U
-      & (uint8_t)1U);
+    uint64_t *nq_p12 = p01_tmp12 + 10U;
+    uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U);
     uint64_t sw = swap1[0U] ^ bit;
     Hacl_Impl_Curve25519_Field51_cswap2(sw, nq2, nq_p12);
     point_add_and_double(init, p01_tmp12, tmp2);
@@ -149,11 +144,11 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t sw = swap[0U];
   Hacl_Impl_Curve25519_Field51_cswap2(sw, nq1, nq_p11);
   uint64_t *nq10 = p01_tmp1;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
+  uint64_t *tmp1 = p01_tmp1 + 20U;
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
-  memcpy(out, p0, (uint32_t)10U * sizeof (uint64_t));
+  memcpy(out, p0, 10U * sizeof (uint64_t));
 }
 
 void
@@ -165,7 +160,7 @@ Hacl_Curve25519_51_fsquare_times(
 )
 {
   Hacl_Impl_Curve25519_Field51_fsqr(o, inp, tmp);
-  for (uint32_t i = (uint32_t)0U; i < n - (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < n - 1U; i++)
   {
     Hacl_Impl_Curve25519_Field51_fsqr(o, o, tmp);
   }
@@ -175,60 +170,56 @@ void Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tm
 {
   uint64_t t1[20U] = { 0U };
   uint64_t *a1 = t1;
-  uint64_t *b1 = t1 + (uint32_t)5U;
-  uint64_t *t010 = t1 + (uint32_t)15U;
+  uint64_t *b1 = t1 + 5U;
+  uint64_t *t010 = t1 + 15U;
   FStar_UInt128_uint128 *tmp10 = tmp;
-  Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, (uint32_t)1U);
-  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)2U);
+  Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, 1U);
+  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 2U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, i, tmp);
   Hacl_Impl_Curve25519_Field51_fmul(a1, b1, a1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)1U);
+  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 1U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t010, b1, tmp10, (uint32_t)5U);
+  Hacl_Curve25519_51_fsquare_times(t010, b1, tmp10, 5U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp);
-  uint64_t *b10 = t1 + (uint32_t)5U;
-  uint64_t *c10 = t1 + (uint32_t)10U;
-  uint64_t *t011 = t1 + (uint32_t)15U;
+  uint64_t *b10 = t1 + 5U;
+  uint64_t *c10 = t1 + 10U;
+  uint64_t *t011 = t1 + 15U;
   FStar_UInt128_uint128 *tmp11 = tmp;
-  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, (uint32_t)10U);
+  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 10U);
   Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, (uint32_t)20U);
+  Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, 20U);
   Hacl_Impl_Curve25519_Field51_fmul(t011, t011, c10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, (uint32_t)10U);
+  Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, 10U);
   Hacl_Impl_Curve25519_Field51_fmul(b10, t011, b10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, (uint32_t)50U);
+  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 50U);
   Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp);
-  uint64_t *b11 = t1 + (uint32_t)5U;
-  uint64_t *c1 = t1 + (uint32_t)10U;
-  uint64_t *t01 = t1 + (uint32_t)15U;
+  uint64_t *b11 = t1 + 5U;
+  uint64_t *c1 = t1 + 10U;
+  uint64_t *t01 = t1 + 15U;
   FStar_UInt128_uint128 *tmp1 = tmp;
-  Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, (uint32_t)100U);
+  Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, 100U);
   Hacl_Impl_Curve25519_Field51_fmul(t01, t01, c1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)50U);
+  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 50U);
   Hacl_Impl_Curve25519_Field51_fmul(t01, t01, b11, tmp);
-  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)5U);
+  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 5U);
   uint64_t *a = t1;
-  uint64_t *t0 = t1 + (uint32_t)15U;
+  uint64_t *t0 = t1 + 15U;
   Hacl_Impl_Curve25519_Field51_fmul(o, t0, a, tmp);
 }
 
 static void encode_point(uint8_t *o, uint64_t *i)
 {
   uint64_t *x = i;
-  uint64_t *z = i + (uint32_t)5U;
+  uint64_t *z = i + 5U;
   uint64_t tmp[5U] = { 0U };
   uint64_t u64s[4U] = { 0U };
   FStar_UInt128_uint128 tmp_w[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp_w[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp_w[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_finv(tmp, z, tmp_w);
   Hacl_Impl_Curve25519_Field51_fmul(tmp, tmp, x, tmp_w);
   Hacl_Impl_Curve25519_Field51_store_felem(u64s, tmp);
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(o + i0 * (uint32_t)8U, u64s[i0]););
+  KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]););
 }
 
 /**
@@ -243,32 +234,32 @@ void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
   uint64_t init[10U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = pub + i * (uint32_t)8U;
+    uint8_t *bj = pub + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t tmp3 = tmp[3U];
-  tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
+  tmp[3U] = tmp3 & 0x7fffffffffffffffULL;
   uint64_t *x = init;
-  uint64_t *z = init + (uint32_t)5U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
-  z[4U] = (uint64_t)0U;
-  uint64_t f0l = tmp[0U] & (uint64_t)0x7ffffffffffffU;
-  uint64_t f0h = tmp[0U] >> (uint32_t)51U;
-  uint64_t f1l = (tmp[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U;
-  uint64_t f1h = tmp[1U] >> (uint32_t)38U;
-  uint64_t f2l = (tmp[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U;
-  uint64_t f2h = tmp[2U] >> (uint32_t)25U;
-  uint64_t f3l = (tmp[3U] & (uint64_t)0xfffU) << (uint32_t)39U;
-  uint64_t f3h = tmp[3U] >> (uint32_t)12U;
+  uint64_t *z = init + 5U;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
+  z[4U] = 0ULL;
+  uint64_t f0l = tmp[0U] & 0x7ffffffffffffULL;
+  uint64_t f0h = tmp[0U] >> 51U;
+  uint64_t f1l = (tmp[1U] & 0x3fffffffffULL) << 13U;
+  uint64_t f1h = tmp[1U] >> 38U;
+  uint64_t f2l = (tmp[2U] & 0x1ffffffULL) << 26U;
+  uint64_t f2h = tmp[2U] >> 25U;
+  uint64_t f3l = (tmp[3U] & 0xfffULL) << 39U;
+  uint64_t f3h = tmp[3U] >> 12U;
   x[0U] = f0l;
   x[1U] = f0h | f1l;
   x[2U] = f1h | f2l;
@@ -289,7 +280,7 @@ This computes a scalar multiplication of the secret/private key with the curve's
 void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv)
 {
   uint8_t basepoint[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = basepoint;
     uint8_t x = g25519[i];
@@ -309,14 +300,14 @@ bool Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
 {
   uint8_t zeros[32U] = { 0U };
   Hacl_Curve25519_51_scalarmult(out, priv, pub);
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  bool r = z == (uint8_t)255U;
+  bool r = z == 255U;
   return !r;
 }
 
diff --git a/src/Hacl_Curve25519_64.c b/src/Hacl_Curve25519_64.c
index fb0974fe..edcab306 100644
--- a/src/Hacl_Curve25519_64.c
+++ b/src/Hacl_Curve25519_64.c
@@ -35,7 +35,7 @@ static inline void add_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   add_scalar(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(add_scalar_e(out, f1, f2));
+  add_scalar_e(out, f1, f2);
   #endif
 }
 
@@ -44,7 +44,7 @@ static inline void fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fadd(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fadd_e(out, f1, f2));
+  fadd_e(out, f1, f2);
   #endif
 }
 
@@ -53,7 +53,7 @@ static inline void fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsub(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fsub_e(out, f1, f2));
+  fsub_e(out, f1, f2);
   #endif
 }
 
@@ -62,7 +62,7 @@ static inline void fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tm
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul(out, f1, f2, tmp);
   #else
-  KRML_HOST_IGNORE(fmul_e(tmp, f1, out, f2));
+  fmul_e(tmp, f1, out, f2);
   #endif
 }
 
@@ -71,7 +71,7 @@ static inline void fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *t
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul2(out, f1, f2, tmp);
   #else
-  KRML_HOST_IGNORE(fmul2_e(tmp, f1, out, f2));
+  fmul2_e(tmp, f1, out, f2);
   #endif
 }
 
@@ -80,7 +80,7 @@ static inline void fmul_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul_scalar(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fmul_scalar_e(out, f1, f2));
+  fmul_scalar_e(out, f1, f2);
   #endif
 }
 
@@ -89,7 +89,7 @@ static inline void fsqr0(uint64_t *out, uint64_t *f1, uint64_t *tmp)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsqr(out, f1, tmp);
   #else
-  KRML_HOST_IGNORE(fsqr_e(tmp, f1, out));
+  fsqr_e(tmp, f1, out);
   #endif
 }
 
@@ -98,7 +98,7 @@ static inline void fsqr20(uint64_t *out, uint64_t *f, uint64_t *tmp)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsqr2(out, f, tmp);
   #else
-  KRML_HOST_IGNORE(fsqr2_e(tmp, f, out));
+  fsqr2_e(tmp, f, out);
   #endif
 }
 
@@ -107,42 +107,42 @@ static inline void cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   cswap2(bit, p1, p2);
   #else
-  KRML_HOST_IGNORE(cswap2_e(bit, p1, p2));
+  cswap2_e(bit, p1, p2);
   #endif
 }
 
-static const uint8_t g25519[32U] = { (uint8_t)9U };
+static const uint8_t g25519[32U] = { 9U };
 
 static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2)
 {
   uint64_t *nq = p01_tmp1;
-  uint64_t *nq_p1 = p01_tmp1 + (uint32_t)8U;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
+  uint64_t *nq_p1 = p01_tmp1 + 8U;
+  uint64_t *tmp1 = p01_tmp1 + 16U;
   uint64_t *x1 = q;
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)4U;
-  uint64_t *z3 = nq_p1 + (uint32_t)4U;
+  uint64_t *z2 = nq + 4U;
+  uint64_t *z3 = nq_p1 + 4U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)4U;
+  uint64_t *b = tmp1 + 4U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)8U;
+  uint64_t *dc = tmp1 + 8U;
   fadd0(a, x2, z2);
   fsub0(b, x2, z2);
   uint64_t *x3 = nq_p1;
-  uint64_t *z31 = nq_p1 + (uint32_t)4U;
+  uint64_t *z31 = nq_p1 + 4U;
   uint64_t *d0 = dc;
-  uint64_t *c0 = dc + (uint32_t)4U;
+  uint64_t *c0 = dc + 4U;
   fadd0(c0, x3, z31);
   fsub0(d0, x3, z31);
   fmul20(dc, dc, ab, tmp2);
   fadd0(x3, d0, c0);
   fsub0(z31, d0, c0);
   uint64_t *a1 = tmp1;
-  uint64_t *b1 = tmp1 + (uint32_t)4U;
-  uint64_t *d = tmp1 + (uint32_t)8U;
-  uint64_t *c = tmp1 + (uint32_t)12U;
+  uint64_t *b1 = tmp1 + 4U;
+  uint64_t *d = tmp1 + 8U;
+  uint64_t *c = tmp1 + 12U;
   uint64_t *ab1 = tmp1;
-  uint64_t *dc1 = tmp1 + (uint32_t)8U;
+  uint64_t *dc1 = tmp1 + 8U;
   fsqr20(dc1, ab1, tmp2);
   fsqr20(nq_p1, nq_p1, tmp2);
   a1[0U] = c[0U];
@@ -150,7 +150,7 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2
   a1[2U] = c[2U];
   a1[3U] = c[3U];
   fsub0(c, d, c);
-  fmul_scalar0(b1, c, (uint64_t)121665U);
+  fmul_scalar0(b1, c, 121665ULL);
   fadd0(b1, b1, d);
   fmul20(nq, dc1, ab1, tmp2);
   fmul0(z3, z3, x1, tmp2);
@@ -159,13 +159,13 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2
 static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
 {
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)4U;
+  uint64_t *z2 = nq + 4U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)4U;
-  uint64_t *d = tmp1 + (uint32_t)8U;
-  uint64_t *c = tmp1 + (uint32_t)12U;
+  uint64_t *b = tmp1 + 4U;
+  uint64_t *d = tmp1 + 8U;
+  uint64_t *c = tmp1 + 12U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)8U;
+  uint64_t *dc = tmp1 + 8U;
   fadd0(a, x2, z2);
   fsub0(b, x2, z2);
   fsqr20(dc, ab, tmp2);
@@ -174,7 +174,7 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
   a[2U] = c[2U];
   a[3U] = c[3U];
   fsub0(c, d, c);
-  fmul_scalar0(b, c, (uint64_t)121665U);
+  fmul_scalar0(b, c, 121665ULL);
   fadd0(b, b, d);
   fmul20(nq, dc, ab, tmp2);
 }
@@ -186,38 +186,33 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t *p0 = p01_tmp1_swap;
   uint64_t *p01 = p01_tmp1_swap;
   uint64_t *p03 = p01;
-  uint64_t *p11 = p01 + (uint32_t)8U;
-  memcpy(p11, init, (uint32_t)8U * sizeof (uint64_t));
+  uint64_t *p11 = p01 + 8U;
+  memcpy(p11, init, 8U * sizeof (uint64_t));
   uint64_t *x0 = p03;
-  uint64_t *z0 = p03 + (uint32_t)4U;
-  x0[0U] = (uint64_t)1U;
-  x0[1U] = (uint64_t)0U;
-  x0[2U] = (uint64_t)0U;
-  x0[3U] = (uint64_t)0U;
-  z0[0U] = (uint64_t)0U;
-  z0[1U] = (uint64_t)0U;
-  z0[2U] = (uint64_t)0U;
-  z0[3U] = (uint64_t)0U;
+  uint64_t *z0 = p03 + 4U;
+  x0[0U] = 1ULL;
+  x0[1U] = 0ULL;
+  x0[2U] = 0ULL;
+  x0[3U] = 0ULL;
+  z0[0U] = 0ULL;
+  z0[1U] = 0ULL;
+  z0[2U] = 0ULL;
+  z0[3U] = 0ULL;
   uint64_t *p01_tmp1 = p01_tmp1_swap;
   uint64_t *p01_tmp11 = p01_tmp1_swap;
   uint64_t *nq1 = p01_tmp1_swap;
-  uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)8U;
-  uint64_t *swap = p01_tmp1_swap + (uint32_t)32U;
-  cswap20((uint64_t)1U, nq1, nq_p11);
+  uint64_t *nq_p11 = p01_tmp1_swap + 8U;
+  uint64_t *swap = p01_tmp1_swap + 32U;
+  cswap20(1ULL, nq1, nq_p11);
   point_add_and_double(init, p01_tmp11, tmp2);
-  swap[0U] = (uint64_t)1U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++)
+  swap[0U] = 1ULL;
+  for (uint32_t i = 0U; i < 251U; i++)
   {
     uint64_t *p01_tmp12 = p01_tmp1_swap;
-    uint64_t *swap1 = p01_tmp1_swap + (uint32_t)32U;
+    uint64_t *swap1 = p01_tmp1_swap + 32U;
     uint64_t *nq2 = p01_tmp12;
-    uint64_t *nq_p12 = p01_tmp12 + (uint32_t)8U;
-    uint64_t
-    bit =
-      (uint64_t)(key[((uint32_t)253U - i)
-      / (uint32_t)8U]
-      >> ((uint32_t)253U - i) % (uint32_t)8U
-      & (uint8_t)1U);
+    uint64_t *nq_p12 = p01_tmp12 + 8U;
+    uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U);
     uint64_t sw = swap1[0U] ^ bit;
     cswap20(sw, nq2, nq_p12);
     point_add_and_double(init, p01_tmp12, tmp2);
@@ -226,17 +221,17 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t sw = swap[0U];
   cswap20(sw, nq1, nq_p11);
   uint64_t *nq10 = p01_tmp1;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
+  uint64_t *tmp1 = p01_tmp1 + 16U;
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
-  memcpy(out, p0, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(out, p0, 8U * sizeof (uint64_t));
 }
 
 static void fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n)
 {
   fsqr0(o, inp, tmp);
-  for (uint32_t i = (uint32_t)0U; i < n - (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < n - 1U; i++)
   {
     fsqr0(o, o, tmp);
   }
@@ -246,66 +241,66 @@ static void finv(uint64_t *o, uint64_t *i, uint64_t *tmp)
 {
   uint64_t t1[16U] = { 0U };
   uint64_t *a1 = t1;
-  uint64_t *b1 = t1 + (uint32_t)4U;
-  uint64_t *t010 = t1 + (uint32_t)12U;
+  uint64_t *b1 = t1 + 4U;
+  uint64_t *t010 = t1 + 12U;
   uint64_t *tmp10 = tmp;
-  fsquare_times(a1, i, tmp10, (uint32_t)1U);
-  fsquare_times(t010, a1, tmp10, (uint32_t)2U);
+  fsquare_times(a1, i, tmp10, 1U);
+  fsquare_times(t010, a1, tmp10, 2U);
   fmul0(b1, t010, i, tmp);
   fmul0(a1, b1, a1, tmp);
-  fsquare_times(t010, a1, tmp10, (uint32_t)1U);
+  fsquare_times(t010, a1, tmp10, 1U);
   fmul0(b1, t010, b1, tmp);
-  fsquare_times(t010, b1, tmp10, (uint32_t)5U);
+  fsquare_times(t010, b1, tmp10, 5U);
   fmul0(b1, t010, b1, tmp);
-  uint64_t *b10 = t1 + (uint32_t)4U;
-  uint64_t *c10 = t1 + (uint32_t)8U;
-  uint64_t *t011 = t1 + (uint32_t)12U;
+  uint64_t *b10 = t1 + 4U;
+  uint64_t *c10 = t1 + 8U;
+  uint64_t *t011 = t1 + 12U;
   uint64_t *tmp11 = tmp;
-  fsquare_times(t011, b10, tmp11, (uint32_t)10U);
+  fsquare_times(t011, b10, tmp11, 10U);
   fmul0(c10, t011, b10, tmp);
-  fsquare_times(t011, c10, tmp11, (uint32_t)20U);
+  fsquare_times(t011, c10, tmp11, 20U);
   fmul0(t011, t011, c10, tmp);
-  fsquare_times(t011, t011, tmp11, (uint32_t)10U);
+  fsquare_times(t011, t011, tmp11, 10U);
   fmul0(b10, t011, b10, tmp);
-  fsquare_times(t011, b10, tmp11, (uint32_t)50U);
+  fsquare_times(t011, b10, tmp11, 50U);
   fmul0(c10, t011, b10, tmp);
-  uint64_t *b11 = t1 + (uint32_t)4U;
-  uint64_t *c1 = t1 + (uint32_t)8U;
-  uint64_t *t01 = t1 + (uint32_t)12U;
+  uint64_t *b11 = t1 + 4U;
+  uint64_t *c1 = t1 + 8U;
+  uint64_t *t01 = t1 + 12U;
   uint64_t *tmp1 = tmp;
-  fsquare_times(t01, c1, tmp1, (uint32_t)100U);
+  fsquare_times(t01, c1, tmp1, 100U);
   fmul0(t01, t01, c1, tmp);
-  fsquare_times(t01, t01, tmp1, (uint32_t)50U);
+  fsquare_times(t01, t01, tmp1, 50U);
   fmul0(t01, t01, b11, tmp);
-  fsquare_times(t01, t01, tmp1, (uint32_t)5U);
+  fsquare_times(t01, t01, tmp1, 5U);
   uint64_t *a = t1;
-  uint64_t *t0 = t1 + (uint32_t)12U;
+  uint64_t *t0 = t1 + 12U;
   fmul0(o, t0, a, tmp);
 }
 
 static void store_felem(uint64_t *b, uint64_t *f)
 {
   uint64_t f30 = f[3U];
-  uint64_t top_bit0 = f30 >> (uint32_t)63U;
-  f[3U] = f30 & (uint64_t)0x7fffffffffffffffU;
-  add_scalar0(f, f, (uint64_t)19U * top_bit0);
+  uint64_t top_bit0 = f30 >> 63U;
+  f[3U] = f30 & 0x7fffffffffffffffULL;
+  add_scalar0(f, f, 19ULL * top_bit0);
   uint64_t f31 = f[3U];
-  uint64_t top_bit = f31 >> (uint32_t)63U;
-  f[3U] = f31 & (uint64_t)0x7fffffffffffffffU;
-  add_scalar0(f, f, (uint64_t)19U * top_bit);
+  uint64_t top_bit = f31 >> 63U;
+  f[3U] = f31 & 0x7fffffffffffffffULL;
+  add_scalar0(f, f, 19ULL * top_bit);
   uint64_t f0 = f[0U];
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
-  uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0xffffffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0xffffffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0xffffffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7fffffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f0, 0xffffffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f1, 0xffffffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f2, 0xffffffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f3, 0x7fffffffffffffffULL);
   uint64_t mask = ((m0 & m1) & m2) & m3;
-  uint64_t f0_ = f0 - (mask & (uint64_t)0xffffffffffffffedU);
-  uint64_t f1_ = f1 - (mask & (uint64_t)0xffffffffffffffffU);
-  uint64_t f2_ = f2 - (mask & (uint64_t)0xffffffffffffffffU);
-  uint64_t f3_ = f3 - (mask & (uint64_t)0x7fffffffffffffffU);
+  uint64_t f0_ = f0 - (mask & 0xffffffffffffffedULL);
+  uint64_t f1_ = f1 - (mask & 0xffffffffffffffffULL);
+  uint64_t f2_ = f2 - (mask & 0xffffffffffffffffULL);
+  uint64_t f3_ = f3 - (mask & 0x7fffffffffffffffULL);
   uint64_t o0 = f0_;
   uint64_t o1 = f1_;
   uint64_t o2 = f2_;
@@ -319,18 +314,14 @@ static void store_felem(uint64_t *b, uint64_t *f)
 static void encode_point(uint8_t *o, uint64_t *i)
 {
   uint64_t *x = i;
-  uint64_t *z = i + (uint32_t)4U;
+  uint64_t *z = i + 4U;
   uint64_t tmp[4U] = { 0U };
   uint64_t u64s[4U] = { 0U };
   uint64_t tmp_w[16U] = { 0U };
   finv(tmp, z, tmp_w);
   fmul0(tmp, tmp, x, tmp_w);
   store_felem(u64s, tmp);
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(o + i0 * (uint32_t)8U, u64s[i0]););
+  KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]););
 }
 
 /**
@@ -345,23 +336,23 @@ void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
   uint64_t init[8U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = pub + i * (uint32_t)8U;
+    uint8_t *bj = pub + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t tmp3 = tmp[3U];
-  tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
+  tmp[3U] = tmp3 & 0x7fffffffffffffffULL;
   uint64_t *x = init;
-  uint64_t *z = init + (uint32_t)4U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
+  uint64_t *z = init + 4U;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
   x[0U] = tmp[0U];
   x[1U] = tmp[1U];
   x[2U] = tmp[2U];
@@ -381,7 +372,7 @@ This computes a scalar multiplication of the secret/private key with the curve's
 void Hacl_Curve25519_64_secret_to_public(uint8_t *pub, uint8_t *priv)
 {
   uint8_t basepoint[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = basepoint;
     uint8_t x = g25519[i];
@@ -401,14 +392,14 @@ bool Hacl_Curve25519_64_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
 {
   uint8_t zeros[32U] = { 0U };
   Hacl_Curve25519_64_scalarmult(out, priv, pub);
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  bool r = z == (uint8_t)255U;
+  bool r = z == 255U;
   return !r;
 }
 
diff --git a/src/Hacl_EC_Ed25519.c b/src/Hacl_EC_Ed25519.c
index 46f2837b..6ab24a33 100644
--- a/src/Hacl_EC_Ed25519.c
+++ b/src/Hacl_EC_Ed25519.c
@@ -43,11 +43,11 @@ Write the additive identity in `f`.
 */
 void Hacl_EC_Ed25519_mk_felem_zero(uint64_t *b)
 {
-  b[0U] = (uint64_t)0U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
 }
 
 /**
@@ -57,11 +57,11 @@ Write the multiplicative identity in `f`.
 */
 void Hacl_EC_Ed25519_mk_felem_one(uint64_t *b)
 {
-  b[0U] = (uint64_t)1U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 1ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
 }
 
 /**
@@ -106,8 +106,8 @@ Write `a * b mod p` in `out`.
 void Hacl_EC_Ed25519_felem_mul(uint64_t *a, uint64_t *b, uint64_t *out)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fmul(out, a, b, tmp);
 }
 
@@ -123,8 +123,8 @@ Write `a * a mod p` in `out`.
 void Hacl_EC_Ed25519_felem_sqr(uint64_t *a, uint64_t *out)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp);
 }
 
@@ -205,29 +205,29 @@ Write the base point (generator) in `p`.
 void Hacl_EC_Ed25519_mk_base_point(uint64_t *p)
 {
   uint64_t *gx = p;
-  uint64_t *gy = p + (uint32_t)5U;
-  uint64_t *gz = p + (uint32_t)10U;
-  uint64_t *gt = p + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = p + 5U;
+  uint64_t *gz = p + 10U;
+  uint64_t *gt = p + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
 }
 
 /**
diff --git a/src/Hacl_EC_K256.c b/src/Hacl_EC_K256.c
index e48edb5b..581c223b 100644
--- a/src/Hacl_EC_K256.c
+++ b/src/Hacl_EC_K256.c
@@ -43,7 +43,7 @@ Write the additive identity in `f`.
 */
 void Hacl_EC_K256_mk_felem_zero(uint64_t *f)
 {
-  memset(f, 0U, (uint32_t)5U * sizeof (uint64_t));
+  memset(f, 0U, 5U * sizeof (uint64_t));
 }
 
 /**
@@ -53,8 +53,8 @@ Write the multiplicative identity in `f`.
 */
 void Hacl_EC_K256_mk_felem_one(uint64_t *f)
 {
-  memset(f, 0U, (uint32_t)5U * sizeof (uint64_t));
-  f[0U] = (uint64_t)1U;
+  memset(f, 0U, 5U * sizeof (uint64_t));
+  f[0U] = 1ULL;
 }
 
 /**
@@ -83,7 +83,7 @@ Write `a - b mod p` in `out`.
 */
 void Hacl_EC_K256_felem_sub(uint64_t *a, uint64_t *b, uint64_t *out)
 {
-  Hacl_K256_Field_fsub(out, a, b, (uint64_t)2U);
+  Hacl_K256_Field_fsub(out, a, b, 2ULL);
   Hacl_K256_Field_fnormalize_weak(out, out);
 }
 
@@ -189,20 +189,20 @@ Write the base point (generator) in `p`.
 void Hacl_EC_K256_mk_base_point(uint64_t *p)
 {
   uint64_t *gx = p;
-  uint64_t *gy = p + (uint32_t)5U;
-  uint64_t *gz = p + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = p + 5U;
+  uint64_t *gz = p + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
 }
 
 /**
@@ -264,11 +264,11 @@ void Hacl_EC_K256_point_mul(uint8_t *scalar, uint64_t *p, uint64_t *out)
 {
   uint64_t scalar_q[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = scalar_q;
-    uint64_t u = load64_be(scalar + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(scalar + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   Hacl_Impl_K256_PointMul_point_mul(out, scalar_q, p);
@@ -307,20 +307,20 @@ void Hacl_EC_K256_point_load(uint8_t *b, uint64_t *out)
 {
   uint64_t p_aff[10U] = { 0U };
   uint64_t *px = p_aff;
-  uint64_t *py = p_aff + (uint32_t)5U;
+  uint64_t *py = p_aff + 5U;
   uint8_t *pxb = b;
-  uint8_t *pyb = b + (uint32_t)32U;
+  uint8_t *pyb = b + 32U;
   Hacl_K256_Field_load_felem(px, pxb);
   Hacl_K256_Field_load_felem(py, pyb);
   uint64_t *x = p_aff;
-  uint64_t *y = p_aff + (uint32_t)5U;
+  uint64_t *y = p_aff + 5U;
   uint64_t *x1 = out;
-  uint64_t *y1 = out + (uint32_t)5U;
-  uint64_t *z1 = out + (uint32_t)10U;
-  memcpy(x1, x, (uint32_t)5U * sizeof (uint64_t));
-  memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-  memset(z1, 0U, (uint32_t)5U * sizeof (uint64_t));
-  z1[0U] = (uint64_t)1U;
+  uint64_t *y1 = out + 5U;
+  uint64_t *z1 = out + 10U;
+  memcpy(x1, x, 5U * sizeof (uint64_t));
+  memcpy(y1, y, 5U * sizeof (uint64_t));
+  memset(z1, 0U, 5U * sizeof (uint64_t));
+  z1[0U] = 1ULL;
 }
 
 /**
diff --git a/src/Hacl_Ed25519.c b/src/Hacl_Ed25519.c
index f9881e91..05d96cd0 100644
--- a/src/Hacl_Ed25519.c
+++ b/src/Hacl_Ed25519.c
@@ -49,24 +49,24 @@ void Hacl_Bignum25519_reduce_513(uint64_t *a)
   uint64_t f2 = a[2U];
   uint64_t f3 = a[3U];
   uint64_t f4 = a[4U];
-  uint64_t l_ = f0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = f0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = f1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = f2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = f3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = f4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   a[0U] = tmp0_;
   a[1U] = tmp1 + c5;
   a[2U] = tmp2;
@@ -77,8 +77,8 @@ void Hacl_Bignum25519_reduce_513(uint64_t *a)
 static inline void fmul0(uint64_t *output, uint64_t *input, uint64_t *input2)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fmul(output, input, input2, tmp);
 }
 
@@ -89,11 +89,11 @@ static inline void times_2(uint64_t *out, uint64_t *a)
   uint64_t a2 = a[2U];
   uint64_t a3 = a[3U];
   uint64_t a4 = a[4U];
-  uint64_t o0 = (uint64_t)2U * a0;
-  uint64_t o1 = (uint64_t)2U * a1;
-  uint64_t o2 = (uint64_t)2U * a2;
-  uint64_t o3 = (uint64_t)2U * a3;
-  uint64_t o4 = (uint64_t)2U * a4;
+  uint64_t o0 = 2ULL * a0;
+  uint64_t o1 = 2ULL * a1;
+  uint64_t o2 = 2ULL * a2;
+  uint64_t o3 = 2ULL * a3;
+  uint64_t o4 = 2ULL * a4;
   out[0U] = o0;
   out[1U] = o1;
   out[2U] = o2;
@@ -104,54 +104,54 @@ static inline void times_2(uint64_t *out, uint64_t *a)
 static inline void times_d(uint64_t *out, uint64_t *a)
 {
   uint64_t d[5U] = { 0U };
-  d[0U] = (uint64_t)0x00034dca135978a3U;
-  d[1U] = (uint64_t)0x0001a8283b156ebdU;
-  d[2U] = (uint64_t)0x0005e7a26001c029U;
-  d[3U] = (uint64_t)0x000739c663a03cbbU;
-  d[4U] = (uint64_t)0x00052036cee2b6ffU;
+  d[0U] = 0x00034dca135978a3ULL;
+  d[1U] = 0x0001a8283b156ebdULL;
+  d[2U] = 0x0005e7a26001c029ULL;
+  d[3U] = 0x000739c663a03cbbULL;
+  d[4U] = 0x00052036cee2b6ffULL;
   fmul0(out, d, a);
 }
 
 static inline void times_2d(uint64_t *out, uint64_t *a)
 {
   uint64_t d2[5U] = { 0U };
-  d2[0U] = (uint64_t)0x00069b9426b2f159U;
-  d2[1U] = (uint64_t)0x00035050762add7aU;
-  d2[2U] = (uint64_t)0x0003cf44c0038052U;
-  d2[3U] = (uint64_t)0x0006738cc7407977U;
-  d2[4U] = (uint64_t)0x0002406d9dc56dffU;
+  d2[0U] = 0x00069b9426b2f159ULL;
+  d2[1U] = 0x00035050762add7aULL;
+  d2[2U] = 0x0003cf44c0038052ULL;
+  d2[3U] = 0x0006738cc7407977ULL;
+  d2[4U] = 0x0002406d9dc56dffULL;
   fmul0(out, d2, a);
 }
 
 static inline void fsquare(uint64_t *out, uint64_t *a)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp);
 }
 
 static inline void fsquare_times(uint64_t *output, uint64_t *input, uint32_t count)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_fsquare_times(output, input, tmp, count);
 }
 
 static inline void fsquare_times_inplace(uint64_t *output, uint32_t count)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_fsquare_times(output, output, tmp, count);
 }
 
 void Hacl_Bignum25519_inverse(uint64_t *out, uint64_t *a)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_finv(out, a, tmp);
 }
 
@@ -162,40 +162,40 @@ static inline void reduce(uint64_t *out)
   uint64_t o2 = out[2U];
   uint64_t o3 = out[3U];
   uint64_t o4 = out[4U];
-  uint64_t l_ = o0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = o0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = o1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = o2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = o3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = o4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t f0 = tmp0_;
   uint64_t f1 = tmp1 + c5;
   uint64_t f2 = tmp2;
   uint64_t f3 = tmp3;
   uint64_t f4 = tmp4;
-  uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0x7ffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0x7ffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0x7ffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7ffffffffffffU);
-  uint64_t m4 = FStar_UInt64_eq_mask(f4, (uint64_t)0x7ffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f0, 0x7ffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f1, 0x7ffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f2, 0x7ffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f3, 0x7ffffffffffffULL);
+  uint64_t m4 = FStar_UInt64_eq_mask(f4, 0x7ffffffffffffULL);
   uint64_t mask = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t f0_ = f0 - (mask & (uint64_t)0x7ffffffffffedU);
-  uint64_t f1_ = f1 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f2_ = f2 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f3_ = f3 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f4_ = f4 - (mask & (uint64_t)0x7ffffffffffffU);
+  uint64_t f0_ = f0 - (mask & 0x7ffffffffffedULL);
+  uint64_t f1_ = f1 - (mask & 0x7ffffffffffffULL);
+  uint64_t f2_ = f2 - (mask & 0x7ffffffffffffULL);
+  uint64_t f3_ = f3 - (mask & 0x7ffffffffffffULL);
+  uint64_t f4_ = f4 - (mask & 0x7ffffffffffffULL);
   uint64_t f01 = f0_;
   uint64_t f11 = f1_;
   uint64_t f21 = f2_;
@@ -212,45 +212,41 @@ void Hacl_Bignum25519_load_51(uint64_t *output, uint8_t *input)
 {
   uint64_t u64s[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = u64s;
-    uint8_t *bj = input + i * (uint32_t)8U;
+    uint8_t *bj = input + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t u64s3 = u64s[3U];
-  u64s[3U] = u64s3 & (uint64_t)0x7fffffffffffffffU;
-  output[0U] = u64s[0U] & (uint64_t)0x7ffffffffffffU;
-  output[1U] = u64s[0U] >> (uint32_t)51U | (u64s[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U;
-  output[2U] = u64s[1U] >> (uint32_t)38U | (u64s[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U;
-  output[3U] = u64s[2U] >> (uint32_t)25U | (u64s[3U] & (uint64_t)0xfffU) << (uint32_t)39U;
-  output[4U] = u64s[3U] >> (uint32_t)12U;
+  u64s[3U] = u64s3 & 0x7fffffffffffffffULL;
+  output[0U] = u64s[0U] & 0x7ffffffffffffULL;
+  output[1U] = u64s[0U] >> 51U | (u64s[1U] & 0x3fffffffffULL) << 13U;
+  output[2U] = u64s[1U] >> 38U | (u64s[2U] & 0x1ffffffULL) << 26U;
+  output[3U] = u64s[2U] >> 25U | (u64s[3U] & 0xfffULL) << 39U;
+  output[4U] = u64s[3U] >> 12U;
 }
 
 void Hacl_Bignum25519_store_51(uint8_t *output, uint64_t *input)
 {
   uint64_t u64s[4U] = { 0U };
   Hacl_Impl_Curve25519_Field51_store_felem(u64s, input);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(output + i * (uint32_t)8U, u64s[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(output + i * 8U, u64s[i]););
 }
 
 void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp20 = tmp + (uint32_t)5U;
-  uint64_t *tmp30 = tmp + (uint32_t)10U;
-  uint64_t *tmp40 = tmp + (uint32_t)15U;
+  uint64_t *tmp20 = tmp + 5U;
+  uint64_t *tmp30 = tmp + 10U;
+  uint64_t *tmp40 = tmp + 15U;
   uint64_t *x10 = p;
-  uint64_t *y10 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y10 = p + 5U;
+  uint64_t *z1 = p + 10U;
   fsquare(tmp1, x10);
   fsquare(tmp20, y10);
   fsum(tmp30, tmp1, tmp20);
@@ -258,11 +254,11 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
   fsquare(tmp1, z1);
   times_2(tmp1, tmp1);
   uint64_t *tmp10 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)5U;
-  uint64_t *tmp3 = tmp + (uint32_t)10U;
-  uint64_t *tmp4 = tmp + (uint32_t)15U;
+  uint64_t *tmp2 = tmp + 5U;
+  uint64_t *tmp3 = tmp + 10U;
+  uint64_t *tmp4 = tmp + 15U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
+  uint64_t *y1 = p + 5U;
   fsum(tmp2, x1, y1);
   fsquare(tmp2, tmp2);
   Hacl_Bignum25519_reduce_513(tmp3);
@@ -271,13 +267,13 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
   Hacl_Bignum25519_reduce_513(tmp4);
   fsum(tmp10, tmp10, tmp4);
   uint64_t *tmp_f = tmp;
-  uint64_t *tmp_e = tmp + (uint32_t)5U;
-  uint64_t *tmp_h = tmp + (uint32_t)10U;
-  uint64_t *tmp_g = tmp + (uint32_t)15U;
+  uint64_t *tmp_e = tmp + 5U;
+  uint64_t *tmp_h = tmp + 10U;
+  uint64_t *tmp_g = tmp + 15U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
-  uint64_t *t3 = out + (uint32_t)15U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
+  uint64_t *t3 = out + 15U;
   fmul0(x3, tmp_e, tmp_f);
   fmul0(y3, tmp_g, tmp_h);
   fmul0(t3, tmp_e, tmp_h);
@@ -288,13 +284,13 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
 {
   uint64_t tmp[30U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp20 = tmp + (uint32_t)5U;
-  uint64_t *tmp30 = tmp + (uint32_t)10U;
-  uint64_t *tmp40 = tmp + (uint32_t)15U;
+  uint64_t *tmp20 = tmp + 5U;
+  uint64_t *tmp30 = tmp + 10U;
+  uint64_t *tmp40 = tmp + 15U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
+  uint64_t *y1 = p + 5U;
   uint64_t *x2 = q;
-  uint64_t *y2 = q + (uint32_t)5U;
+  uint64_t *y2 = q + 5U;
   fdifference(tmp1, y1, x1);
   fdifference(tmp20, y2, x2);
   fmul0(tmp30, tmp1, tmp20);
@@ -302,15 +298,15 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
   fsum(tmp20, y2, x2);
   fmul0(tmp40, tmp1, tmp20);
   uint64_t *tmp10 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)5U;
-  uint64_t *tmp3 = tmp + (uint32_t)10U;
-  uint64_t *tmp4 = tmp + (uint32_t)15U;
-  uint64_t *tmp5 = tmp + (uint32_t)20U;
-  uint64_t *tmp6 = tmp + (uint32_t)25U;
-  uint64_t *z1 = p + (uint32_t)10U;
-  uint64_t *t1 = p + (uint32_t)15U;
-  uint64_t *z2 = q + (uint32_t)10U;
-  uint64_t *t2 = q + (uint32_t)15U;
+  uint64_t *tmp2 = tmp + 5U;
+  uint64_t *tmp3 = tmp + 10U;
+  uint64_t *tmp4 = tmp + 15U;
+  uint64_t *tmp5 = tmp + 20U;
+  uint64_t *tmp6 = tmp + 25U;
+  uint64_t *z1 = p + 10U;
+  uint64_t *t1 = p + 15U;
+  uint64_t *z2 = q + 10U;
+  uint64_t *t2 = q + 15U;
   times_2d(tmp10, t1);
   fmul0(tmp10, tmp10, t2);
   times_2(tmp2, z1);
@@ -320,13 +316,13 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
   fsum(tmp10, tmp2, tmp10);
   fsum(tmp2, tmp4, tmp3);
   uint64_t *tmp_g = tmp;
-  uint64_t *tmp_h = tmp + (uint32_t)5U;
-  uint64_t *tmp_e = tmp + (uint32_t)20U;
-  uint64_t *tmp_f = tmp + (uint32_t)25U;
+  uint64_t *tmp_h = tmp + 5U;
+  uint64_t *tmp_e = tmp + 20U;
+  uint64_t *tmp_f = tmp + 25U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
-  uint64_t *t3 = out + (uint32_t)15U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
+  uint64_t *t3 = out + 15U;
   fmul0(x3, tmp_e, tmp_f);
   fmul0(y3, tmp_g, tmp_h);
   fmul0(t3, tmp_e, tmp_h);
@@ -336,64 +332,64 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
 void Hacl_Impl_Ed25519_PointConstants_make_point_inf(uint64_t *b)
 {
   uint64_t *x = b;
-  uint64_t *y = b + (uint32_t)5U;
-  uint64_t *z = b + (uint32_t)10U;
-  uint64_t *t = b + (uint32_t)15U;
-  x[0U] = (uint64_t)0U;
-  x[1U] = (uint64_t)0U;
-  x[2U] = (uint64_t)0U;
-  x[3U] = (uint64_t)0U;
-  x[4U] = (uint64_t)0U;
-  y[0U] = (uint64_t)1U;
-  y[1U] = (uint64_t)0U;
-  y[2U] = (uint64_t)0U;
-  y[3U] = (uint64_t)0U;
-  y[4U] = (uint64_t)0U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
-  z[4U] = (uint64_t)0U;
-  t[0U] = (uint64_t)0U;
-  t[1U] = (uint64_t)0U;
-  t[2U] = (uint64_t)0U;
-  t[3U] = (uint64_t)0U;
-  t[4U] = (uint64_t)0U;
+  uint64_t *y = b + 5U;
+  uint64_t *z = b + 10U;
+  uint64_t *t = b + 15U;
+  x[0U] = 0ULL;
+  x[1U] = 0ULL;
+  x[2U] = 0ULL;
+  x[3U] = 0ULL;
+  x[4U] = 0ULL;
+  y[0U] = 1ULL;
+  y[1U] = 0ULL;
+  y[2U] = 0ULL;
+  y[3U] = 0ULL;
+  y[4U] = 0ULL;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
+  z[4U] = 0ULL;
+  t[0U] = 0ULL;
+  t[1U] = 0ULL;
+  t[2U] = 0ULL;
+  t[3U] = 0ULL;
+  t[4U] = 0ULL;
 }
 
 static inline void pow2_252m2(uint64_t *out, uint64_t *z)
 {
   uint64_t buf[20U] = { 0U };
   uint64_t *a = buf;
-  uint64_t *t00 = buf + (uint32_t)5U;
-  uint64_t *b0 = buf + (uint32_t)10U;
-  uint64_t *c0 = buf + (uint32_t)15U;
-  fsquare_times(a, z, (uint32_t)1U);
-  fsquare_times(t00, a, (uint32_t)2U);
+  uint64_t *t00 = buf + 5U;
+  uint64_t *b0 = buf + 10U;
+  uint64_t *c0 = buf + 15U;
+  fsquare_times(a, z, 1U);
+  fsquare_times(t00, a, 2U);
   fmul0(b0, t00, z);
   fmul0(a, b0, a);
-  fsquare_times(t00, a, (uint32_t)1U);
+  fsquare_times(t00, a, 1U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)5U);
+  fsquare_times(t00, b0, 5U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)10U);
+  fsquare_times(t00, b0, 10U);
   fmul0(c0, t00, b0);
-  fsquare_times(t00, c0, (uint32_t)20U);
+  fsquare_times(t00, c0, 20U);
   fmul0(t00, t00, c0);
-  fsquare_times_inplace(t00, (uint32_t)10U);
+  fsquare_times_inplace(t00, 10U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)50U);
+  fsquare_times(t00, b0, 50U);
   uint64_t *a0 = buf;
-  uint64_t *t0 = buf + (uint32_t)5U;
-  uint64_t *b = buf + (uint32_t)10U;
-  uint64_t *c = buf + (uint32_t)15U;
-  fsquare_times(a0, z, (uint32_t)1U);
+  uint64_t *t0 = buf + 5U;
+  uint64_t *b = buf + 10U;
+  uint64_t *c = buf + 15U;
+  fsquare_times(a0, z, 1U);
   fmul0(c, t0, b);
-  fsquare_times(t0, c, (uint32_t)100U);
+  fsquare_times(t0, c, 100U);
   fmul0(t0, t0, c);
-  fsquare_times_inplace(t0, (uint32_t)50U);
+  fsquare_times_inplace(t0, 50U);
   fmul0(t0, t0, b);
-  fsquare_times_inplace(t0, (uint32_t)2U);
+  fsquare_times_inplace(t0, 2U);
   fmul0(out, t0, a0);
 }
 
@@ -404,23 +400,17 @@ static inline bool is_0(uint64_t *x)
   uint64_t x2 = x[2U];
   uint64_t x3 = x[3U];
   uint64_t x4 = x[4U];
-  return
-    x0
-    == (uint64_t)0U
-    && x1 == (uint64_t)0U
-    && x2 == (uint64_t)0U
-    && x3 == (uint64_t)0U
-    && x4 == (uint64_t)0U;
+  return x0 == 0ULL && x1 == 0ULL && x2 == 0ULL && x3 == 0ULL && x4 == 0ULL;
 }
 
 static inline void mul_modp_sqrt_m1(uint64_t *x)
 {
   uint64_t sqrt_m1[5U] = { 0U };
-  sqrt_m1[0U] = (uint64_t)0x00061b274a0ea0b0U;
-  sqrt_m1[1U] = (uint64_t)0x0000d5a5fc8f189dU;
-  sqrt_m1[2U] = (uint64_t)0x0007ef5e9cbd0c60U;
-  sqrt_m1[3U] = (uint64_t)0x00078595a6804c9eU;
-  sqrt_m1[4U] = (uint64_t)0x0002b8324804fc1dU;
+  sqrt_m1[0U] = 0x00061b274a0ea0b0ULL;
+  sqrt_m1[1U] = 0x0000d5a5fc8f189dULL;
+  sqrt_m1[2U] = 0x0007ef5e9cbd0c60ULL;
+  sqrt_m1[3U] = 0x00078595a6804c9eULL;
+  sqrt_m1[4U] = 0x0002b8324804fc1dULL;
   fmul0(x, x, sqrt_m1);
 }
 
@@ -436,11 +426,11 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
   bool
   b =
     x00
-    >= (uint64_t)0x7ffffffffffedU
-    && x1 == (uint64_t)0x7ffffffffffffU
-    && x21 == (uint64_t)0x7ffffffffffffU
-    && x30 == (uint64_t)0x7ffffffffffffU
-    && x4 == (uint64_t)0x7ffffffffffffU;
+    >= 0x7ffffffffffedULL
+    && x1 == 0x7ffffffffffffULL
+    && x21 == 0x7ffffffffffffULL
+    && x30 == 0x7ffffffffffffULL
+    && x4 == 0x7ffffffffffffULL;
   bool res;
   if (b)
   {
@@ -450,14 +440,14 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
   {
     uint64_t tmp1[20U] = { 0U };
     uint64_t *one = tmp1;
-    uint64_t *y2 = tmp1 + (uint32_t)5U;
-    uint64_t *dyyi = tmp1 + (uint32_t)10U;
-    uint64_t *dyy = tmp1 + (uint32_t)15U;
-    one[0U] = (uint64_t)1U;
-    one[1U] = (uint64_t)0U;
-    one[2U] = (uint64_t)0U;
-    one[3U] = (uint64_t)0U;
-    one[4U] = (uint64_t)0U;
+    uint64_t *y2 = tmp1 + 5U;
+    uint64_t *dyyi = tmp1 + 10U;
+    uint64_t *dyy = tmp1 + 15U;
+    one[0U] = 1ULL;
+    one[1U] = 0ULL;
+    one[2U] = 0ULL;
+    one[3U] = 0ULL;
+    one[4U] = 0ULL;
     fsquare(y2, y);
     times_d(dyy, y2);
     fsum(dyy, dyy, one);
@@ -470,37 +460,37 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
     uint8_t z;
     if (x2_is_0)
     {
-      if (sign == (uint64_t)0U)
+      if (sign == 0ULL)
       {
-        x[0U] = (uint64_t)0U;
-        x[1U] = (uint64_t)0U;
-        x[2U] = (uint64_t)0U;
-        x[3U] = (uint64_t)0U;
-        x[4U] = (uint64_t)0U;
-        z = (uint8_t)1U;
+        x[0U] = 0ULL;
+        x[1U] = 0ULL;
+        x[2U] = 0ULL;
+        x[3U] = 0ULL;
+        x[4U] = 0ULL;
+        z = 1U;
       }
       else
       {
-        z = (uint8_t)0U;
+        z = 0U;
       }
     }
     else
     {
-      z = (uint8_t)2U;
+      z = 2U;
     }
-    if (z == (uint8_t)0U)
+    if (z == 0U)
     {
       res = false;
     }
-    else if (z == (uint8_t)1U)
+    else if (z == 1U)
     {
       res = true;
     }
     else
     {
       uint64_t *x210 = tmp;
-      uint64_t *x31 = tmp + (uint32_t)5U;
-      uint64_t *t00 = tmp + (uint32_t)10U;
+      uint64_t *x31 = tmp + 5U;
+      uint64_t *t00 = tmp + 10U;
       pow2_252m2(x31, x210);
       fsquare(t00, x31);
       fdifference(t00, t00, x210);
@@ -512,8 +502,8 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
         mul_modp_sqrt_m1(x31);
       }
       uint64_t *x211 = tmp;
-      uint64_t *x3 = tmp + (uint32_t)5U;
-      uint64_t *t01 = tmp + (uint32_t)10U;
+      uint64_t *x3 = tmp + 5U;
+      uint64_t *t01 = tmp + 10U;
       fsquare(t01, x3);
       fdifference(t01, t01, x211);
       Hacl_Bignum25519_reduce_513(t01);
@@ -525,23 +515,23 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
       }
       else
       {
-        uint64_t *x32 = tmp + (uint32_t)5U;
-        uint64_t *t0 = tmp + (uint32_t)10U;
+        uint64_t *x32 = tmp + 5U;
+        uint64_t *t0 = tmp + 10U;
         reduce(x32);
         uint64_t x0 = x32[0U];
-        uint64_t x01 = x0 & (uint64_t)1U;
+        uint64_t x01 = x0 & 1ULL;
         if (!(x01 == sign))
         {
-          t0[0U] = (uint64_t)0U;
-          t0[1U] = (uint64_t)0U;
-          t0[2U] = (uint64_t)0U;
-          t0[3U] = (uint64_t)0U;
-          t0[4U] = (uint64_t)0U;
+          t0[0U] = 0ULL;
+          t0[1U] = 0ULL;
+          t0[2U] = 0ULL;
+          t0[3U] = 0ULL;
+          t0[4U] = 0ULL;
           fdifference(x32, t0, x32);
           Hacl_Bignum25519_reduce_513(x32);
           reduce(x32);
         }
-        memcpy(x, x32, (uint32_t)5U * sizeof (uint64_t));
+        memcpy(x, x32, 5U * sizeof (uint64_t));
         res = true;
       }
     }
@@ -554,9 +544,9 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
 {
   uint64_t tmp[10U] = { 0U };
   uint64_t *y = tmp;
-  uint64_t *x = tmp + (uint32_t)5U;
+  uint64_t *x = tmp + 5U;
   uint8_t s31 = s[31U];
-  uint8_t z = s31 >> (uint32_t)7U;
+  uint8_t z = (uint32_t)s31 >> 7U;
   uint64_t sign = (uint64_t)z;
   Hacl_Bignum25519_load_51(y, s);
   bool z0 = recover_x(x, y, sign);
@@ -568,16 +558,16 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
   else
   {
     uint64_t *outx = out;
-    uint64_t *outy = out + (uint32_t)5U;
-    uint64_t *outz = out + (uint32_t)10U;
-    uint64_t *outt = out + (uint32_t)15U;
-    memcpy(outx, x, (uint32_t)5U * sizeof (uint64_t));
-    memcpy(outy, y, (uint32_t)5U * sizeof (uint64_t));
-    outz[0U] = (uint64_t)1U;
-    outz[1U] = (uint64_t)0U;
-    outz[2U] = (uint64_t)0U;
-    outz[3U] = (uint64_t)0U;
-    outz[4U] = (uint64_t)0U;
+    uint64_t *outy = out + 5U;
+    uint64_t *outz = out + 10U;
+    uint64_t *outt = out + 15U;
+    memcpy(outx, x, 5U * sizeof (uint64_t));
+    memcpy(outy, y, 5U * sizeof (uint64_t));
+    outz[0U] = 1ULL;
+    outz[1U] = 0ULL;
+    outz[2U] = 0ULL;
+    outz[3U] = 0ULL;
+    outz[4U] = 0ULL;
     fmul0(outt, x, y);
     res = true;
   }
@@ -588,25 +578,25 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
 void Hacl_Impl_Ed25519_PointCompress_point_compress(uint8_t *z, uint64_t *p)
 {
   uint64_t tmp[15U] = { 0U };
-  uint64_t *x = tmp + (uint32_t)5U;
-  uint64_t *out = tmp + (uint32_t)10U;
+  uint64_t *x = tmp + 5U;
+  uint64_t *out = tmp + 10U;
   uint64_t *zinv1 = tmp;
-  uint64_t *x1 = tmp + (uint32_t)5U;
-  uint64_t *out1 = tmp + (uint32_t)10U;
+  uint64_t *x1 = tmp + 5U;
+  uint64_t *out1 = tmp + 10U;
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   Hacl_Bignum25519_inverse(zinv1, pz);
   fmul0(x1, px, zinv1);
   reduce(x1);
   fmul0(out1, py, zinv1);
   Hacl_Bignum25519_reduce_513(out1);
   uint64_t x0 = x[0U];
-  uint64_t b = x0 & (uint64_t)1U;
+  uint64_t b = x0 & 1ULL;
   Hacl_Bignum25519_store_51(z, out);
   uint8_t xbyte = (uint8_t)b;
   uint8_t o31 = z[31U];
-  z[31U] = o31 + (xbyte << (uint32_t)7U);
+  z[31U] = (uint32_t)o31 + ((uint32_t)xbyte << 7U);
 }
 
 static inline void barrett_reduction(uint64_t *z, uint64_t *t)
@@ -621,40 +611,40 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t t7 = t[7U];
   uint64_t t8 = t[8U];
   uint64_t t9 = t[9U];
-  uint64_t m00 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m10 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m20 = (uint64_t)0x000000000014deU;
-  uint64_t m30 = (uint64_t)0x00000000000000U;
-  uint64_t m40 = (uint64_t)0x00000010000000U;
+  uint64_t m00 = 0x12631a5cf5d3edULL;
+  uint64_t m10 = 0xf9dea2f79cd658ULL;
+  uint64_t m20 = 0x000000000014deULL;
+  uint64_t m30 = 0x00000000000000ULL;
+  uint64_t m40 = 0x00000010000000ULL;
   uint64_t m0 = m00;
   uint64_t m1 = m10;
   uint64_t m2 = m20;
   uint64_t m3 = m30;
   uint64_t m4 = m40;
-  uint64_t m010 = (uint64_t)0x9ce5a30a2c131bU;
-  uint64_t m110 = (uint64_t)0x215d086329a7edU;
-  uint64_t m210 = (uint64_t)0xffffffffeb2106U;
-  uint64_t m310 = (uint64_t)0xffffffffffffffU;
-  uint64_t m410 = (uint64_t)0x00000fffffffffU;
+  uint64_t m010 = 0x9ce5a30a2c131bULL;
+  uint64_t m110 = 0x215d086329a7edULL;
+  uint64_t m210 = 0xffffffffeb2106ULL;
+  uint64_t m310 = 0xffffffffffffffULL;
+  uint64_t m410 = 0x00000fffffffffULL;
   uint64_t mu0 = m010;
   uint64_t mu1 = m110;
   uint64_t mu2 = m210;
   uint64_t mu3 = m310;
   uint64_t mu4 = m410;
-  uint64_t y_ = (t5 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_ = t4 >> (uint32_t)24U;
+  uint64_t y_ = (t5 & 0xffffffULL) << 32U;
+  uint64_t x_ = t4 >> 24U;
   uint64_t z00 = x_ | y_;
-  uint64_t y_0 = (t6 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_0 = t5 >> (uint32_t)24U;
+  uint64_t y_0 = (t6 & 0xffffffULL) << 32U;
+  uint64_t x_0 = t5 >> 24U;
   uint64_t z10 = x_0 | y_0;
-  uint64_t y_1 = (t7 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_1 = t6 >> (uint32_t)24U;
+  uint64_t y_1 = (t7 & 0xffffffULL) << 32U;
+  uint64_t x_1 = t6 >> 24U;
   uint64_t z20 = x_1 | y_1;
-  uint64_t y_2 = (t8 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_2 = t7 >> (uint32_t)24U;
+  uint64_t y_2 = (t8 & 0xffffffULL) << 32U;
+  uint64_t x_2 = t7 >> 24U;
   uint64_t z30 = x_2 | y_2;
-  uint64_t y_3 = (t9 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_3 = t8 >> (uint32_t)24U;
+  uint64_t y_3 = (t9 & 0xffffffULL) << 32U;
+  uint64_t x_3 = t8 >> 24U;
   uint64_t z40 = x_3 | y_3;
   uint64_t q0 = z00;
   uint64_t q1 = z10;
@@ -707,55 +697,37 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   FStar_UInt128_uint128 z6 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42);
   FStar_UInt128_uint128 z7 = FStar_UInt128_add_mod(xy34, xy43);
   FStar_UInt128_uint128 z8 = xy44;
-  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, (uint32_t)56U);
+  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, 56U);
   FStar_UInt128_uint128 c00 = carry0;
-  FStar_UInt128_uint128
-  carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), (uint32_t)56U);
+  FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), 56U);
   FStar_UInt128_uint128 c10 = carry1;
-  FStar_UInt128_uint128
-  carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), (uint32_t)56U);
+  FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), 56U);
   FStar_UInt128_uint128 c20 = carry2;
-  FStar_UInt128_uint128
-  carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), (uint32_t)56U);
+  FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), 56U);
   FStar_UInt128_uint128 c30 = carry3;
-  FStar_UInt128_uint128
-  carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), (uint32_t)56U);
+  FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), 56U);
   uint64_t
-  t100 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30))
-    & (uint64_t)0xffffffffffffffU;
+  t100 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c40 = carry4;
   uint64_t t410 = t100;
-  FStar_UInt128_uint128
-  carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), (uint32_t)56U);
+  FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), 56U);
   uint64_t
-  t101 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40))
-    & (uint64_t)0xffffffffffffffU;
+  t101 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c5 = carry5;
   uint64_t t51 = t101;
-  FStar_UInt128_uint128
-  carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), (uint32_t)56U);
+  FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), 56U);
   uint64_t
-  t102 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5))
-    & (uint64_t)0xffffffffffffffU;
+  t102 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c6 = carry6;
   uint64_t t61 = t102;
-  FStar_UInt128_uint128
-  carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), (uint32_t)56U);
+  FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), 56U);
   uint64_t
-  t103 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6))
-    & (uint64_t)0xffffffffffffffU;
+  t103 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c7 = carry7;
   uint64_t t71 = t103;
-  FStar_UInt128_uint128
-  carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), (uint32_t)56U);
+  FStar_UInt128_uint128 carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), 56U);
   uint64_t
-  t104 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7))
-    & (uint64_t)0xffffffffffffffU;
+  t104 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c8 = carry8;
   uint64_t t81 = t104;
   uint64_t t91 = FStar_UInt128_uint128_to_uint64(c8);
@@ -765,20 +737,20 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t qmu7_ = t71;
   uint64_t qmu8_ = t81;
   uint64_t qmu9_ = t91;
-  uint64_t y_4 = (qmu5_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_4 = qmu4_ >> (uint32_t)40U;
+  uint64_t y_4 = (qmu5_ & 0xffffffffffULL) << 16U;
+  uint64_t x_4 = qmu4_ >> 40U;
   uint64_t z02 = x_4 | y_4;
-  uint64_t y_5 = (qmu6_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_5 = qmu5_ >> (uint32_t)40U;
+  uint64_t y_5 = (qmu6_ & 0xffffffffffULL) << 16U;
+  uint64_t x_5 = qmu5_ >> 40U;
   uint64_t z12 = x_5 | y_5;
-  uint64_t y_6 = (qmu7_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_6 = qmu6_ >> (uint32_t)40U;
+  uint64_t y_6 = (qmu7_ & 0xffffffffffULL) << 16U;
+  uint64_t x_6 = qmu6_ >> 40U;
   uint64_t z22 = x_6 | y_6;
-  uint64_t y_7 = (qmu8_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_7 = qmu7_ >> (uint32_t)40U;
+  uint64_t y_7 = (qmu8_ & 0xffffffffffULL) << 16U;
+  uint64_t x_7 = qmu7_ >> 40U;
   uint64_t z32 = x_7 | y_7;
-  uint64_t y_8 = (qmu9_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_8 = qmu8_ >> (uint32_t)40U;
+  uint64_t y_8 = (qmu9_ & 0xffffffffffULL) << 16U;
+  uint64_t x_8 = qmu8_ >> 40U;
   uint64_t z42 = x_8 | y_8;
   uint64_t qdiv0 = z02;
   uint64_t qdiv1 = z12;
@@ -789,7 +761,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t r1 = t1;
   uint64_t r2 = t2;
   uint64_t r3 = t3;
-  uint64_t r4 = t4 & (uint64_t)0xffffffffffU;
+  uint64_t r4 = t4 & 0xffffffffffULL;
   FStar_UInt128_uint128 xy00 = FStar_UInt128_mul_wide(qdiv0, m0);
   FStar_UInt128_uint128 xy01 = FStar_UInt128_mul_wide(qdiv0, m1);
   FStar_UInt128_uint128 xy02 = FStar_UInt128_mul_wide(qdiv0, m2);
@@ -805,18 +777,18 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   FStar_UInt128_uint128 xy30 = FStar_UInt128_mul_wide(qdiv3, m0);
   FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(qdiv3, m1);
   FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(qdiv4, m0);
-  FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, (uint32_t)56U);
-  uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU;
+  FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, 56U);
+  uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c0 = carry9;
   uint64_t t010 = t105;
   FStar_UInt128_uint128
   carry10 =
     FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t106 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c11 = carry10;
   uint64_t t110 = t106;
   FStar_UInt128_uint128
@@ -825,14 +797,14 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy11),
           xy20),
         c11),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t107 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02,
             xy11),
           xy20),
         c11))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c21 = carry11;
   uint64_t t210 = t107;
   FStar_UInt128_uint128
@@ -842,7 +814,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy21),
           xy30),
         c21),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t108 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03,
@@ -850,7 +822,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy21),
           xy30),
         c21))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c31 = carry;
   uint64_t t310 = t108;
   uint64_t
@@ -861,67 +833,67 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy31),
           xy40),
         c31))
-    & (uint64_t)0xffffffffffU;
+    & 0xffffffffffULL;
   uint64_t qmul0 = t010;
   uint64_t qmul1 = t110;
   uint64_t qmul2 = t210;
   uint64_t qmul3 = t310;
   uint64_t qmul4 = t411;
-  uint64_t b5 = (r0 - qmul0) >> (uint32_t)63U;
-  uint64_t t109 = (b5 << (uint32_t)56U) + r0 - qmul0;
+  uint64_t b5 = (r0 - qmul0) >> 63U;
+  uint64_t t109 = (b5 << 56U) + r0 - qmul0;
   uint64_t c1 = b5;
   uint64_t t011 = t109;
-  uint64_t b6 = (r1 - (qmul1 + c1)) >> (uint32_t)63U;
-  uint64_t t1010 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1);
+  uint64_t b6 = (r1 - (qmul1 + c1)) >> 63U;
+  uint64_t t1010 = (b6 << 56U) + r1 - (qmul1 + c1);
   uint64_t c2 = b6;
   uint64_t t111 = t1010;
-  uint64_t b7 = (r2 - (qmul2 + c2)) >> (uint32_t)63U;
-  uint64_t t1011 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2);
+  uint64_t b7 = (r2 - (qmul2 + c2)) >> 63U;
+  uint64_t t1011 = (b7 << 56U) + r2 - (qmul2 + c2);
   uint64_t c3 = b7;
   uint64_t t211 = t1011;
-  uint64_t b8 = (r3 - (qmul3 + c3)) >> (uint32_t)63U;
-  uint64_t t1012 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3);
+  uint64_t b8 = (r3 - (qmul3 + c3)) >> 63U;
+  uint64_t t1012 = (b8 << 56U) + r3 - (qmul3 + c3);
   uint64_t c4 = b8;
   uint64_t t311 = t1012;
-  uint64_t b9 = (r4 - (qmul4 + c4)) >> (uint32_t)63U;
-  uint64_t t1013 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4);
+  uint64_t b9 = (r4 - (qmul4 + c4)) >> 63U;
+  uint64_t t1013 = (b9 << 40U) + r4 - (qmul4 + c4);
   uint64_t t412 = t1013;
   uint64_t s0 = t011;
   uint64_t s1 = t111;
   uint64_t s2 = t211;
   uint64_t s3 = t311;
   uint64_t s4 = t412;
-  uint64_t m01 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m11 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m21 = (uint64_t)0x000000000014deU;
-  uint64_t m31 = (uint64_t)0x00000000000000U;
-  uint64_t m41 = (uint64_t)0x00000010000000U;
+  uint64_t m01 = 0x12631a5cf5d3edULL;
+  uint64_t m11 = 0xf9dea2f79cd658ULL;
+  uint64_t m21 = 0x000000000014deULL;
+  uint64_t m31 = 0x00000000000000ULL;
+  uint64_t m41 = 0x00000010000000ULL;
   uint64_t y0 = m01;
   uint64_t y1 = m11;
   uint64_t y2 = m21;
   uint64_t y3 = m31;
   uint64_t y4 = m41;
-  uint64_t b10 = (s0 - y0) >> (uint32_t)63U;
-  uint64_t t1014 = (b10 << (uint32_t)56U) + s0 - y0;
+  uint64_t b10 = (s0 - y0) >> 63U;
+  uint64_t t1014 = (b10 << 56U) + s0 - y0;
   uint64_t b0 = b10;
   uint64_t t01 = t1014;
-  uint64_t b11 = (s1 - (y1 + b0)) >> (uint32_t)63U;
-  uint64_t t1015 = (b11 << (uint32_t)56U) + s1 - (y1 + b0);
+  uint64_t b11 = (s1 - (y1 + b0)) >> 63U;
+  uint64_t t1015 = (b11 << 56U) + s1 - (y1 + b0);
   uint64_t b1 = b11;
   uint64_t t11 = t1015;
-  uint64_t b12 = (s2 - (y2 + b1)) >> (uint32_t)63U;
-  uint64_t t1016 = (b12 << (uint32_t)56U) + s2 - (y2 + b1);
+  uint64_t b12 = (s2 - (y2 + b1)) >> 63U;
+  uint64_t t1016 = (b12 << 56U) + s2 - (y2 + b1);
   uint64_t b2 = b12;
   uint64_t t21 = t1016;
-  uint64_t b13 = (s3 - (y3 + b2)) >> (uint32_t)63U;
-  uint64_t t1017 = (b13 << (uint32_t)56U) + s3 - (y3 + b2);
+  uint64_t b13 = (s3 - (y3 + b2)) >> 63U;
+  uint64_t t1017 = (b13 << 56U) + s3 - (y3 + b2);
   uint64_t b3 = b13;
   uint64_t t31 = t1017;
-  uint64_t b = (s4 - (y4 + b3)) >> (uint32_t)63U;
-  uint64_t t10 = (b << (uint32_t)56U) + s4 - (y4 + b3);
+  uint64_t b = (s4 - (y4 + b3)) >> 63U;
+  uint64_t t10 = (b << 56U) + s4 - (y4 + b3);
   uint64_t b4 = b;
   uint64_t t41 = t10;
-  uint64_t mask = b4 - (uint64_t)1U;
+  uint64_t mask = b4 - 1ULL;
   uint64_t z03 = s0 ^ (mask & (s0 ^ t01));
   uint64_t z13 = s1 ^ (mask & (s1 ^ t11));
   uint64_t z23 = s2 ^ (mask & (s2 ^ t21));
@@ -1008,72 +980,48 @@ static inline void mul_modq(uint64_t *out, uint64_t *x, uint64_t *y)
   FStar_UInt128_uint128 z60 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42);
   FStar_UInt128_uint128 z70 = FStar_UInt128_add_mod(xy34, xy43);
   FStar_UInt128_uint128 z80 = xy44;
-  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, (uint32_t)56U);
-  uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & (uint64_t)0xffffffffffffffU;
+  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, 56U);
+  uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c0 = carry0;
   uint64_t t0 = t10;
-  FStar_UInt128_uint128
-  carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), (uint32_t)56U);
+  FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), 56U);
   uint64_t
-  t11 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0))
-    & (uint64_t)0xffffffffffffffU;
+  t11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c1 = carry1;
   uint64_t t1 = t11;
-  FStar_UInt128_uint128
-  carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), (uint32_t)56U);
+  FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), 56U);
   uint64_t
-  t12 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1))
-    & (uint64_t)0xffffffffffffffU;
+  t12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c2 = carry2;
   uint64_t t2 = t12;
-  FStar_UInt128_uint128
-  carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), (uint32_t)56U);
+  FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), 56U);
   uint64_t
-  t13 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2))
-    & (uint64_t)0xffffffffffffffU;
+  t13 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c3 = carry3;
   uint64_t t3 = t13;
-  FStar_UInt128_uint128
-  carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), (uint32_t)56U);
+  FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), 56U);
   uint64_t
-  t14 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3))
-    & (uint64_t)0xffffffffffffffU;
+  t14 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c4 = carry4;
   uint64_t t4 = t14;
-  FStar_UInt128_uint128
-  carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), (uint32_t)56U);
+  FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), 56U);
   uint64_t
-  t15 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4))
-    & (uint64_t)0xffffffffffffffU;
+  t15 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c5 = carry5;
   uint64_t t5 = t15;
-  FStar_UInt128_uint128
-  carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), (uint32_t)56U);
+  FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), 56U);
   uint64_t
-  t16 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5))
-    & (uint64_t)0xffffffffffffffU;
+  t16 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c6 = carry6;
   uint64_t t6 = t16;
-  FStar_UInt128_uint128
-  carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), (uint32_t)56U);
+  FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), 56U);
   uint64_t
-  t17 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6))
-    & (uint64_t)0xffffffffffffffU;
+  t17 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c7 = carry7;
   uint64_t t7 = t17;
-  FStar_UInt128_uint128
-  carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), (uint32_t)56U);
+  FStar_UInt128_uint128 carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), 56U);
   uint64_t
-  t =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7))
-    & (uint64_t)0xffffffffffffffU;
+  t = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c8 = carry;
   uint64_t t8 = t;
   uint64_t t9 = FStar_UInt128_uint128_to_uint64(c8);
@@ -1112,54 +1060,54 @@ static inline void add_modq(uint64_t *out, uint64_t *x, uint64_t *y)
   uint64_t y2 = y[2U];
   uint64_t y3 = y[3U];
   uint64_t y4 = y[4U];
-  uint64_t carry0 = (x0 + y0) >> (uint32_t)56U;
-  uint64_t t0 = (x0 + y0) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry0 = (x0 + y0) >> 56U;
+  uint64_t t0 = (x0 + y0) & 0xffffffffffffffULL;
   uint64_t t00 = t0;
   uint64_t c0 = carry0;
-  uint64_t carry1 = (x1 + y1 + c0) >> (uint32_t)56U;
-  uint64_t t1 = (x1 + y1 + c0) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry1 = (x1 + y1 + c0) >> 56U;
+  uint64_t t1 = (x1 + y1 + c0) & 0xffffffffffffffULL;
   uint64_t t10 = t1;
   uint64_t c1 = carry1;
-  uint64_t carry2 = (x2 + y2 + c1) >> (uint32_t)56U;
-  uint64_t t2 = (x2 + y2 + c1) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry2 = (x2 + y2 + c1) >> 56U;
+  uint64_t t2 = (x2 + y2 + c1) & 0xffffffffffffffULL;
   uint64_t t20 = t2;
   uint64_t c2 = carry2;
-  uint64_t carry = (x3 + y3 + c2) >> (uint32_t)56U;
-  uint64_t t3 = (x3 + y3 + c2) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry = (x3 + y3 + c2) >> 56U;
+  uint64_t t3 = (x3 + y3 + c2) & 0xffffffffffffffULL;
   uint64_t t30 = t3;
   uint64_t c3 = carry;
   uint64_t t4 = x4 + y4 + c3;
-  uint64_t m0 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m1 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m2 = (uint64_t)0x000000000014deU;
-  uint64_t m3 = (uint64_t)0x00000000000000U;
-  uint64_t m4 = (uint64_t)0x00000010000000U;
+  uint64_t m0 = 0x12631a5cf5d3edULL;
+  uint64_t m1 = 0xf9dea2f79cd658ULL;
+  uint64_t m2 = 0x000000000014deULL;
+  uint64_t m3 = 0x00000000000000ULL;
+  uint64_t m4 = 0x00000010000000ULL;
   uint64_t y01 = m0;
   uint64_t y11 = m1;
   uint64_t y21 = m2;
   uint64_t y31 = m3;
   uint64_t y41 = m4;
-  uint64_t b5 = (t00 - y01) >> (uint32_t)63U;
-  uint64_t t5 = (b5 << (uint32_t)56U) + t00 - y01;
+  uint64_t b5 = (t00 - y01) >> 63U;
+  uint64_t t5 = (b5 << 56U) + t00 - y01;
   uint64_t b0 = b5;
   uint64_t t01 = t5;
-  uint64_t b6 = (t10 - (y11 + b0)) >> (uint32_t)63U;
-  uint64_t t6 = (b6 << (uint32_t)56U) + t10 - (y11 + b0);
+  uint64_t b6 = (t10 - (y11 + b0)) >> 63U;
+  uint64_t t6 = (b6 << 56U) + t10 - (y11 + b0);
   uint64_t b1 = b6;
   uint64_t t11 = t6;
-  uint64_t b7 = (t20 - (y21 + b1)) >> (uint32_t)63U;
-  uint64_t t7 = (b7 << (uint32_t)56U) + t20 - (y21 + b1);
+  uint64_t b7 = (t20 - (y21 + b1)) >> 63U;
+  uint64_t t7 = (b7 << 56U) + t20 - (y21 + b1);
   uint64_t b2 = b7;
   uint64_t t21 = t7;
-  uint64_t b8 = (t30 - (y31 + b2)) >> (uint32_t)63U;
-  uint64_t t8 = (b8 << (uint32_t)56U) + t30 - (y31 + b2);
+  uint64_t b8 = (t30 - (y31 + b2)) >> 63U;
+  uint64_t t8 = (b8 << 56U) + t30 - (y31 + b2);
   uint64_t b3 = b8;
   uint64_t t31 = t8;
-  uint64_t b = (t4 - (y41 + b3)) >> (uint32_t)63U;
-  uint64_t t = (b << (uint32_t)56U) + t4 - (y41 + b3);
+  uint64_t b = (t4 - (y41 + b3)) >> 63U;
+  uint64_t t = (b << 56U) + t4 - (y41 + b3);
   uint64_t b4 = b;
   uint64_t t41 = t;
-  uint64_t mask = b4 - (uint64_t)1U;
+  uint64_t mask = b4 - 1ULL;
   uint64_t z00 = t00 ^ (mask & (t00 ^ t01));
   uint64_t z10 = t10 ^ (mask & (t10 ^ t11));
   uint64_t z20 = t20 ^ (mask & (t20 ^ t21));
@@ -1194,35 +1142,35 @@ static inline bool gte_q(uint64_t *s)
   uint64_t s2 = s[2U];
   uint64_t s3 = s[3U];
   uint64_t s4 = s[4U];
-  if (s4 > (uint64_t)0x00000010000000U)
+  if (s4 > 0x00000010000000ULL)
   {
     return true;
   }
-  if (s4 < (uint64_t)0x00000010000000U)
+  if (s4 < 0x00000010000000ULL)
   {
     return false;
   }
-  if (s3 > (uint64_t)0x00000000000000U)
+  if (s3 > 0x00000000000000ULL)
   {
     return true;
   }
-  if (s2 > (uint64_t)0x000000000014deU)
+  if (s2 > 0x000000000014deULL)
   {
     return true;
   }
-  if (s2 < (uint64_t)0x000000000014deU)
+  if (s2 < 0x000000000014deULL)
   {
     return false;
   }
-  if (s1 > (uint64_t)0xf9dea2f79cd658U)
+  if (s1 > 0xf9dea2f79cd658ULL)
   {
     return true;
   }
-  if (s1 < (uint64_t)0xf9dea2f79cd658U)
+  if (s1 < 0xf9dea2f79cd658ULL)
   {
     return false;
   }
-  if (s0 >= (uint64_t)0x12631a5cf5d3edU)
+  if (s0 >= 0x12631a5cf5d3edULL)
   {
     return true;
   }
@@ -1248,19 +1196,19 @@ bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *pxqz = tmp;
-  uint64_t *qxpz = tmp + (uint32_t)5U;
-  fmul0(pxqz, p, q + (uint32_t)10U);
+  uint64_t *qxpz = tmp + 5U;
+  fmul0(pxqz, p, q + 10U);
   reduce(pxqz);
-  fmul0(qxpz, q, p + (uint32_t)10U);
+  fmul0(qxpz, q, p + 10U);
   reduce(qxpz);
   bool b = eq(pxqz, qxpz);
   if (b)
   {
-    uint64_t *pyqz = tmp + (uint32_t)10U;
-    uint64_t *qypz = tmp + (uint32_t)15U;
-    fmul0(pyqz, p + (uint32_t)5U, q + (uint32_t)10U);
+    uint64_t *pyqz = tmp + 10U;
+    uint64_t *qypz = tmp + 15U;
+    fmul0(pyqz, p + 5U, q + 10U);
     reduce(pyqz);
-    fmul0(qypz, q + (uint32_t)5U, p + (uint32_t)10U);
+    fmul0(qypz, q + 5U, p + 10U);
     reduce(qypz);
     return eq(pyqz, qypz);
   }
@@ -1270,23 +1218,23 @@ bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q)
 void Hacl_Impl_Ed25519_PointNegate_point_negate(uint64_t *p, uint64_t *out)
 {
   uint64_t zero[5U] = { 0U };
-  zero[0U] = (uint64_t)0U;
-  zero[1U] = (uint64_t)0U;
-  zero[2U] = (uint64_t)0U;
-  zero[3U] = (uint64_t)0U;
-  zero[4U] = (uint64_t)0U;
+  zero[0U] = 0ULL;
+  zero[1U] = 0ULL;
+  zero[2U] = 0ULL;
+  zero[3U] = 0ULL;
+  zero[4U] = 0ULL;
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)5U;
-  uint64_t *z = p + (uint32_t)10U;
-  uint64_t *t = p + (uint32_t)15U;
+  uint64_t *y = p + 5U;
+  uint64_t *z = p + 10U;
+  uint64_t *t = p + 15U;
   uint64_t *x1 = out;
-  uint64_t *y1 = out + (uint32_t)5U;
-  uint64_t *z1 = out + (uint32_t)10U;
-  uint64_t *t1 = out + (uint32_t)15U;
+  uint64_t *y1 = out + 5U;
+  uint64_t *z1 = out + 10U;
+  uint64_t *t1 = out + 15U;
   fdifference(x1, zero, x);
   Hacl_Bignum25519_reduce_513(x1);
-  memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-  memcpy(z1, z, (uint32_t)5U * sizeof (uint64_t));
+  memcpy(y1, y, 5U * sizeof (uint64_t));
+  memcpy(z1, z, 5U * sizeof (uint64_t));
   fdifference(t1, zero, t);
   Hacl_Bignum25519_reduce_513(t1);
 }
@@ -1295,11 +1243,11 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
 {
   uint64_t bscalar[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar;
-    uint8_t *bj = scalar + i * (uint32_t)8U;
+    uint8_t *bj = scalar + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -1307,42 +1255,34 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
   uint64_t table[320U] = { 0U };
   uint64_t tmp[20U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)20U;
+  uint64_t *t1 = table + 20U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0);
-  memcpy(t1, q, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(t1, q, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)20U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 20U;
     Hacl_Impl_Ed25519_PointDouble_point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U,
-      tmp,
-      (uint32_t)20U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U;
+    memcpy(table + (2U * i + 2U) * 20U, tmp, 20U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 20U;
     Hacl_Impl_Ed25519_PointAdd_point_add(tmp, q, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U,
-      tmp,
-      (uint32_t)20U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 20U, tmp, 20U * sizeof (uint64_t)););
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(out);
   uint64_t tmp0[20U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)20U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 20U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)20U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 20U;
+      for (uint32_t i = 0U; i < 20U; i++)
       {
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -1354,14 +1294,14 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)20U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++)
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 20U;
+    for (uint32_t i = 0U; i < 20U; i++)
     {
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
@@ -1373,107 +1313,97 @@ static inline void point_mul_g(uint64_t *out, uint8_t *scalar)
 {
   uint64_t bscalar[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar;
-    uint8_t *bj = scalar + i * (uint32_t)8U;
+    uint8_t *bj = scalar + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t q1[20U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  uint64_t *gt = q1 + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  uint64_t *gt = q1 + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
   uint64_t
   q2[20U] =
     {
-      (uint64_t)13559344787725U, (uint64_t)2051621493703448U, (uint64_t)1947659315640708U,
-      (uint64_t)626856790370168U, (uint64_t)1592804284034836U, (uint64_t)1781728767459187U,
-      (uint64_t)278818420518009U, (uint64_t)2038030359908351U, (uint64_t)910625973862690U,
-      (uint64_t)471887343142239U, (uint64_t)1298543306606048U, (uint64_t)794147365642417U,
-      (uint64_t)129968992326749U, (uint64_t)523140861678572U, (uint64_t)1166419653909231U,
-      (uint64_t)2009637196928390U, (uint64_t)1288020222395193U, (uint64_t)1007046974985829U,
-      (uint64_t)208981102651386U, (uint64_t)2074009315253380U
+      13559344787725ULL, 2051621493703448ULL, 1947659315640708ULL, 626856790370168ULL,
+      1592804284034836ULL, 1781728767459187ULL, 278818420518009ULL, 2038030359908351ULL,
+      910625973862690ULL, 471887343142239ULL, 1298543306606048ULL, 794147365642417ULL,
+      129968992326749ULL, 523140861678572ULL, 1166419653909231ULL, 2009637196928390ULL,
+      1288020222395193ULL, 1007046974985829ULL, 208981102651386ULL, 2074009315253380ULL
     };
   uint64_t
   q3[20U] =
     {
-      (uint64_t)557549315715710U, (uint64_t)196756086293855U, (uint64_t)846062225082495U,
-      (uint64_t)1865068224838092U, (uint64_t)991112090754908U, (uint64_t)522916421512828U,
-      (uint64_t)2098523346722375U, (uint64_t)1135633221747012U, (uint64_t)858420432114866U,
-      (uint64_t)186358544306082U, (uint64_t)1044420411868480U, (uint64_t)2080052304349321U,
-      (uint64_t)557301814716724U, (uint64_t)1305130257814057U, (uint64_t)2126012765451197U,
-      (uint64_t)1441004402875101U, (uint64_t)353948968859203U, (uint64_t)470765987164835U,
-      (uint64_t)1507675957683570U, (uint64_t)1086650358745097U
+      557549315715710ULL, 196756086293855ULL, 846062225082495ULL, 1865068224838092ULL,
+      991112090754908ULL, 522916421512828ULL, 2098523346722375ULL, 1135633221747012ULL,
+      858420432114866ULL, 186358544306082ULL, 1044420411868480ULL, 2080052304349321ULL,
+      557301814716724ULL, 1305130257814057ULL, 2126012765451197ULL, 1441004402875101ULL,
+      353948968859203ULL, 470765987164835ULL, 1507675957683570ULL, 1086650358745097ULL
     };
   uint64_t
   q4[20U] =
     {
-      (uint64_t)1129953239743101U, (uint64_t)1240339163956160U, (uint64_t)61002583352401U,
-      (uint64_t)2017604552196030U, (uint64_t)1576867829229863U, (uint64_t)1508654942849389U,
-      (uint64_t)270111619664077U, (uint64_t)1253097517254054U, (uint64_t)721798270973250U,
-      (uint64_t)161923365415298U, (uint64_t)828530877526011U, (uint64_t)1494851059386763U,
-      (uint64_t)662034171193976U, (uint64_t)1315349646974670U, (uint64_t)2199229517308806U,
-      (uint64_t)497078277852673U, (uint64_t)1310507715989956U, (uint64_t)1881315714002105U,
-      (uint64_t)2214039404983803U, (uint64_t)1331036420272667U
+      1129953239743101ULL, 1240339163956160ULL, 61002583352401ULL, 2017604552196030ULL,
+      1576867829229863ULL, 1508654942849389ULL, 270111619664077ULL, 1253097517254054ULL,
+      721798270973250ULL, 161923365415298ULL, 828530877526011ULL, 1494851059386763ULL,
+      662034171193976ULL, 1315349646974670ULL, 2199229517308806ULL, 497078277852673ULL,
+      1310507715989956ULL, 1881315714002105ULL, 2214039404983803ULL, 1331036420272667ULL
     };
   uint64_t *r1 = bscalar;
-  uint64_t *r2 = bscalar + (uint32_t)1U;
-  uint64_t *r3 = bscalar + (uint32_t)2U;
-  uint64_t *r4 = bscalar + (uint32_t)3U;
+  uint64_t *r2 = bscalar + 1U;
+  uint64_t *r3 = bscalar + 2U;
+  uint64_t *r4 = bscalar + 3U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(out);
   uint64_t tmp[20U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp););
-  KRML_HOST_IGNORE(q2);
-  KRML_HOST_IGNORE(q3);
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q2);
+  KRML_MAYBE_UNUSED_VAR(q3);
+  KRML_MAYBE_UNUSED_VAR(q4);
 }
 
 static inline void
@@ -1481,48 +1411,48 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *g = tmp;
-  uint64_t *bscalar1 = tmp + (uint32_t)20U;
-  uint64_t *bscalar2 = tmp + (uint32_t)24U;
+  uint64_t *bscalar1 = tmp + 20U;
+  uint64_t *bscalar2 = tmp + 24U;
   uint64_t *gx = g;
-  uint64_t *gy = g + (uint32_t)5U;
-  uint64_t *gz = g + (uint32_t)10U;
-  uint64_t *gt = g + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = g + 5U;
+  uint64_t *gz = g + 10U;
+  uint64_t *gt = g + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar1;
-    uint8_t *bj = scalar1 + i * (uint32_t)8U;
+    uint8_t *bj = scalar1 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar2;
-    uint8_t *bj = scalar2 + i * (uint32_t)8U;
+    uint8_t *bj = scalar2 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -1530,58 +1460,50 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui
   uint64_t table2[640U] = { 0U };
   uint64_t tmp1[20U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)20U;
+  uint64_t *t1 = table2 + 20U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0);
-  memcpy(t1, q2, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(t1, q2, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)20U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 20U;
     Hacl_Impl_Ed25519_PointDouble_point_double(tmp1, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U,
-      tmp1,
-      (uint32_t)20U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U;
+    memcpy(table2 + (2U * i + 2U) * 20U, tmp1, 20U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 20U;
     Hacl_Impl_Ed25519_PointAdd_point_add(tmp1, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U,
-      tmp1,
-      (uint32_t)20U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 20U, tmp1, 20U * sizeof (uint64_t)););
   uint64_t tmp10[20U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
   const
   uint64_t
-  *a_bits_l = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)20U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)20U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, i1, (uint32_t)5U);
+  *a_bits_l = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 20U;
+  memcpy(out, (uint64_t *)a_bits_l, 20U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)20U;
-  memcpy(tmp10, (uint64_t *)a_bits_l0, (uint32_t)20U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 20U;
+  memcpy(tmp10, (uint64_t *)a_bits_l0, 20U * sizeof (uint64_t));
   Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp10);
   uint64_t tmp11[20U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)20U;
-    memcpy(tmp11, (uint64_t *)a_bits_l1, (uint32_t)20U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 20U;
+    memcpy(tmp11, (uint64_t *)a_bits_l1, 20U * sizeof (uint64_t));
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)20U;
-    memcpy(tmp11, (uint64_t *)a_bits_l2, (uint32_t)20U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 20U;
+    memcpy(tmp11, (uint64_t *)a_bits_l2, 20U * sizeof (uint64_t));
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11);
   }
 }
@@ -1609,13 +1531,13 @@ static inline void store_56(uint8_t *out, uint64_t *b)
   uint32_t b4_ = (uint32_t)b4;
   uint8_t *b8 = out;
   store64_le(b8, b0);
-  uint8_t *b80 = out + (uint32_t)7U;
+  uint8_t *b80 = out + 7U;
   store64_le(b80, b1);
-  uint8_t *b81 = out + (uint32_t)14U;
+  uint8_t *b81 = out + 14U;
   store64_le(b81, b2);
-  uint8_t *b82 = out + (uint32_t)21U;
+  uint8_t *b82 = out + 21U;
   store64_le(b82, b3);
-  store32_le(out + (uint32_t)28U, b4_);
+  store32_le(out + 28U, b4_);
 }
 
 static inline void load_64_bytes(uint64_t *out, uint8_t *b)
@@ -1623,39 +1545,39 @@ static inline void load_64_bytes(uint64_t *out, uint8_t *b)
   uint8_t *b80 = b;
   uint64_t u = load64_le(b80);
   uint64_t z = u;
-  uint64_t b0 = z & (uint64_t)0xffffffffffffffU;
-  uint8_t *b81 = b + (uint32_t)7U;
+  uint64_t b0 = z & 0xffffffffffffffULL;
+  uint8_t *b81 = b + 7U;
   uint64_t u0 = load64_le(b81);
   uint64_t z0 = u0;
-  uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b82 = b + (uint32_t)14U;
+  uint64_t b1 = z0 & 0xffffffffffffffULL;
+  uint8_t *b82 = b + 14U;
   uint64_t u1 = load64_le(b82);
   uint64_t z1 = u1;
-  uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b83 = b + (uint32_t)21U;
+  uint64_t b2 = z1 & 0xffffffffffffffULL;
+  uint8_t *b83 = b + 21U;
   uint64_t u2 = load64_le(b83);
   uint64_t z2 = u2;
-  uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b84 = b + (uint32_t)28U;
+  uint64_t b3 = z2 & 0xffffffffffffffULL;
+  uint8_t *b84 = b + 28U;
   uint64_t u3 = load64_le(b84);
   uint64_t z3 = u3;
-  uint64_t b4 = z3 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b85 = b + (uint32_t)35U;
+  uint64_t b4 = z3 & 0xffffffffffffffULL;
+  uint8_t *b85 = b + 35U;
   uint64_t u4 = load64_le(b85);
   uint64_t z4 = u4;
-  uint64_t b5 = z4 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b86 = b + (uint32_t)42U;
+  uint64_t b5 = z4 & 0xffffffffffffffULL;
+  uint8_t *b86 = b + 42U;
   uint64_t u5 = load64_le(b86);
   uint64_t z5 = u5;
-  uint64_t b6 = z5 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b87 = b + (uint32_t)49U;
+  uint64_t b6 = z5 & 0xffffffffffffffULL;
+  uint8_t *b87 = b + 49U;
   uint64_t u6 = load64_le(b87);
   uint64_t z6 = u6;
-  uint64_t b7 = z6 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b8 = b + (uint32_t)56U;
+  uint64_t b7 = z6 & 0xffffffffffffffULL;
+  uint8_t *b8 = b + 56U;
   uint64_t u7 = load64_le(b8);
   uint64_t z7 = u7;
-  uint64_t b88 = z7 & (uint64_t)0xffffffffffffffU;
+  uint64_t b88 = z7 & 0xffffffffffffffULL;
   uint8_t b63 = b[63U];
   uint64_t b9 = (uint64_t)b63;
   out[0U] = b0;
@@ -1675,20 +1597,20 @@ static inline void load_32_bytes(uint64_t *out, uint8_t *b)
   uint8_t *b80 = b;
   uint64_t u0 = load64_le(b80);
   uint64_t z = u0;
-  uint64_t b0 = z & (uint64_t)0xffffffffffffffU;
-  uint8_t *b81 = b + (uint32_t)7U;
+  uint64_t b0 = z & 0xffffffffffffffULL;
+  uint8_t *b81 = b + 7U;
   uint64_t u1 = load64_le(b81);
   uint64_t z0 = u1;
-  uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b82 = b + (uint32_t)14U;
+  uint64_t b1 = z0 & 0xffffffffffffffULL;
+  uint8_t *b82 = b + 14U;
   uint64_t u2 = load64_le(b82);
   uint64_t z1 = u2;
-  uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b8 = b + (uint32_t)21U;
+  uint64_t b2 = z1 & 0xffffffffffffffULL;
+  uint8_t *b8 = b + 21U;
   uint64_t u3 = load64_le(b8);
   uint64_t z2 = u3;
-  uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU;
-  uint32_t u = load32_le(b + (uint32_t)28U);
+  uint64_t b3 = z2 & 0xffffffffffffffULL;
+  uint32_t u = load32_le(b + 28U);
   uint32_t b4 = u;
   uint64_t b41 = (uint64_t)b4;
   out[0U] = b0;
@@ -1703,16 +1625,15 @@ static inline void sha512_pre_msg(uint8_t *hash, uint8_t *prefix, uint32_t len,
   uint8_t buf[128U] = { 0U };
   uint64_t block_state[8U] = { 0U };
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64 p = s;
-  Hacl_SHA2_Scalar32_sha512_init(block_state);
+  Hacl_Hash_SHA2_sha512_init(block_state);
   Hacl_Streaming_MD_state_64 *st = &p;
-  Hacl_Streaming_Types_error_code
-  err0 = Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U);
-  Hacl_Streaming_Types_error_code err1 = Hacl_Streaming_SHA2_update_512(st, input, len);
-  KRML_HOST_IGNORE(err0);
-  KRML_HOST_IGNORE(err1);
-  Hacl_Streaming_SHA2_finish_512(st, hash);
+  Hacl_Streaming_Types_error_code err0 = Hacl_Hash_SHA2_update_512(st, prefix, 32U);
+  Hacl_Streaming_Types_error_code err1 = Hacl_Hash_SHA2_update_512(st, input, len);
+  KRML_MAYBE_UNUSED_VAR(err0);
+  KRML_MAYBE_UNUSED_VAR(err1);
+  Hacl_Hash_SHA2_digest_512(st, hash);
 }
 
 static inline void
@@ -1727,19 +1648,17 @@ sha512_pre_pre2_msg(
   uint8_t buf[128U] = { 0U };
   uint64_t block_state[8U] = { 0U };
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64 p = s;
-  Hacl_SHA2_Scalar32_sha512_init(block_state);
+  Hacl_Hash_SHA2_sha512_init(block_state);
   Hacl_Streaming_MD_state_64 *st = &p;
-  Hacl_Streaming_Types_error_code
-  err0 = Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U);
-  Hacl_Streaming_Types_error_code
-  err1 = Hacl_Streaming_SHA2_update_512(st, prefix2, (uint32_t)32U);
-  Hacl_Streaming_Types_error_code err2 = Hacl_Streaming_SHA2_update_512(st, input, len);
-  KRML_HOST_IGNORE(err0);
-  KRML_HOST_IGNORE(err1);
-  KRML_HOST_IGNORE(err2);
-  Hacl_Streaming_SHA2_finish_512(st, hash);
+  Hacl_Streaming_Types_error_code err0 = Hacl_Hash_SHA2_update_512(st, prefix, 32U);
+  Hacl_Streaming_Types_error_code err1 = Hacl_Hash_SHA2_update_512(st, prefix2, 32U);
+  Hacl_Streaming_Types_error_code err2 = Hacl_Hash_SHA2_update_512(st, input, len);
+  KRML_MAYBE_UNUSED_VAR(err0);
+  KRML_MAYBE_UNUSED_VAR(err1);
+  KRML_MAYBE_UNUSED_VAR(err2);
+  Hacl_Hash_SHA2_digest_512(st, hash);
 }
 
 static inline void
@@ -1777,12 +1696,12 @@ static inline void point_mul_g_compress(uint8_t *out, uint8_t *s)
 
 static inline void secret_expand(uint8_t *expanded, uint8_t *secret)
 {
-  Hacl_Streaming_SHA2_hash_512(secret, (uint32_t)32U, expanded);
+  Hacl_Hash_SHA2_hash_512(expanded, secret, 32U);
   uint8_t *h_low = expanded;
   uint8_t h_low0 = h_low[0U];
   uint8_t h_low31 = h_low[31U];
-  h_low[0U] = h_low0 & (uint8_t)0xf8U;
-  h_low[31U] = (h_low31 & (uint8_t)127U) | (uint8_t)64U;
+  h_low[0U] = (uint32_t)h_low0 & 0xf8U;
+  h_low[31U] = ((uint32_t)h_low31 & 127U) | 64U;
 }
 
 /********************************************************************************
@@ -1816,8 +1735,8 @@ Compute the expanded keys for an Ed25519 signature.
 void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key)
 {
   uint8_t *public_key = expanded_keys;
-  uint8_t *s_prefix = expanded_keys + (uint32_t)32U;
-  uint8_t *s = expanded_keys + (uint32_t)32U;
+  uint8_t *s_prefix = expanded_keys + 32U;
+  uint8_t *s = expanded_keys + 32U;
   secret_expand(s_prefix, private_key);
   point_mul_g_compress(public_key, s);
 }
@@ -1843,13 +1762,13 @@ Hacl_Ed25519_sign_expanded(
 )
 {
   uint8_t *rs = signature;
-  uint8_t *ss = signature + (uint32_t)32U;
+  uint8_t *ss = signature + 32U;
   uint64_t rq[5U] = { 0U };
   uint64_t hq[5U] = { 0U };
   uint8_t rb[32U] = { 0U };
   uint8_t *public_key = expanded_keys;
-  uint8_t *s = expanded_keys + (uint32_t)32U;
-  uint8_t *prefix = expanded_keys + (uint32_t)64U;
+  uint8_t *s = expanded_keys + 32U;
+  uint8_t *prefix = expanded_keys + 64U;
   sha512_modq_pre(rq, prefix, msg_len, msg);
   store_56(rb, rq);
   point_mul_g_compress(rs, rb);
@@ -1904,7 +1823,7 @@ Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t
     {
       uint8_t hb[32U] = { 0U };
       uint8_t *rs1 = signature;
-      uint8_t *sb = signature + (uint32_t)32U;
+      uint8_t *sb = signature + 32U;
       uint64_t tmp[5U] = { 0U };
       load_32_bytes(tmp, sb);
       bool b1 = gte_q(tmp);
diff --git a/src/Hacl_FFDHE.c b/src/Hacl_FFDHE.c
index 9cf2ddfb..dbf245f6 100644
--- a/src/Hacl_FFDHE.c
+++ b/src/Hacl_FFDHE.c
@@ -35,23 +35,23 @@ static inline uint32_t ffdhe_len(Spec_FFDHE_ffdhe_alg a)
   {
     case Spec_FFDHE_FFDHE2048:
       {
-        return (uint32_t)256U;
+        return 256U;
       }
     case Spec_FFDHE_FFDHE3072:
       {
-        return (uint32_t)384U;
+        return 384U;
       }
     case Spec_FFDHE_FFDHE4096:
       {
-        return (uint32_t)512U;
+        return 512U;
       }
     case Spec_FFDHE_FFDHE6144:
       {
-        return (uint32_t)768U;
+        return 768U;
       }
     case Spec_FFDHE_FFDHE8192:
       {
-        return (uint32_t)1024U;
+        return 1024U;
       }
     default:
       {
@@ -63,7 +63,7 @@ static inline uint32_t ffdhe_len(Spec_FFDHE_ffdhe_alg a)
 
 static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   uint64_t *r2_n = p_r2_n + nLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), ffdhe_len(a));
@@ -104,88 +104,80 @@ static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n)
       }
   }
   uint32_t len = ffdhe_len(a);
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint8_t *os = p_s;
     uint8_t x = p[i];
     os[i] = x;
   }
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ffdhe_len(a), p_s, p_n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((ffdhe_len(a) - (uint32_t)1U)
-    / (uint32_t)8U
-    + (uint32_t)1U,
-    (uint32_t)8U * ffdhe_len(a) - (uint32_t)1U,
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((ffdhe_len(a) - 1U) / 8U + 1U,
+    8U * ffdhe_len(a) - 1U,
     p_n,
     r2_n);
 }
 
 static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, uint64_t *p_n)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t p_n1[nLen];
   memset(p_n1, 0U, nLen * sizeof (uint64_t));
-  uint64_t
-  c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, p_n[0U], (uint64_t)1U, p_n1);
-  if ((uint32_t)1U < nLen)
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, p_n[0U], 1ULL, p_n1);
+  if (1U < nLen)
   {
-    uint64_t *a1 = p_n + (uint32_t)1U;
-    uint64_t *res1 = p_n1 + (uint32_t)1U;
+    uint64_t *a1 = p_n + 1U;
+    uint64_t *res1 = p_n1 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (nLen - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (nLen - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (nLen - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < nLen - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (nLen - 1U) / 4U * 4U; i < nLen - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c1 = c;
-    KRML_HOST_IGNORE(c1);
+    KRML_MAYBE_UNUSED_VAR(c1);
   }
   else
   {
-    KRML_HOST_IGNORE(c0);
+    KRML_MAYBE_UNUSED_VAR(c0);
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t b2[nLen];
   memset(b2, 0U, nLen * sizeof (uint64_t));
-  uint32_t i0 = (uint32_t)0U;
-  uint32_t j = (uint32_t)0U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint32_t i0 = 0U;
+  uint32_t j = 0U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b2[i], pk_n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], pk_n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc0;
   uint64_t m0 = res;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(pk_n[i], p_n1[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(pk_n[i], p_n1[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   return m0 & m1;
@@ -200,21 +192,19 @@ ffdhe_compute_exp(
   uint8_t *res
 )
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   uint64_t *r2_n = p_r2_n + nLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t res_n[nLen];
   memset(res_n, 0U, nLen * sizeof (uint64_t));
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(p_n[0U]);
-  Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((ffdhe_len(a) - (uint32_t)1U)
-    / (uint32_t)8U
-    + (uint32_t)1U,
+  Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((ffdhe_len(a) - 1U) / 8U + 1U,
     p_n,
     mu,
     r2_n,
     b_n,
-    (uint32_t)64U * nLen,
+    64U * nLen,
     sk_n,
     res_n);
   Hacl_Bignum_Convert_bn_to_bytes_be_uint64(ffdhe_len(a), res_n, res);
@@ -227,7 +217,7 @@ uint32_t Hacl_FFDHE_ffdhe_len(Spec_FFDHE_ffdhe_alg a)
 
 uint64_t *Hacl_FFDHE_new_ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t *res = (uint64_t *)KRML_HOST_CALLOC(nLen + nLen, sizeof (uint64_t));
   if (res == NULL)
@@ -249,17 +239,17 @@ Hacl_FFDHE_ffdhe_secret_to_public_precomp(
 )
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t g_n[nLen];
   memset(g_n, 0U, nLen * sizeof (uint64_t));
-  uint8_t g = (uint8_t)0U;
+  uint8_t g = 0U;
   {
     uint8_t *os = &g;
     uint8_t x = Hacl_Impl_FFDHE_Constants_ffdhe_g2[0U];
     os[0U] = x;
   }
-  Hacl_Bignum_Convert_bn_from_bytes_be_uint64((uint32_t)1U, &g, g_n);
+  Hacl_Bignum_Convert_bn_from_bytes_be_uint64(1U, &g, g_n);
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t sk_n[nLen];
   memset(sk_n, 0U, nLen * sizeof (uint64_t));
@@ -270,7 +260,7 @@ Hacl_FFDHE_ffdhe_secret_to_public_precomp(
 void Hacl_FFDHE_ffdhe_secret_to_public(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk)
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t p_r2_n[nLen + nLen];
   memset(p_r2_n, 0U, (nLen + nLen) * sizeof (uint64_t));
@@ -288,7 +278,7 @@ Hacl_FFDHE_ffdhe_shared_secret_precomp(
 )
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t sk_n[nLen];
@@ -299,7 +289,7 @@ Hacl_FFDHE_ffdhe_shared_secret_precomp(
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, sk, sk_n);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, pk, pk_n);
   uint64_t m = ffdhe_check_pk(a, pk_n, p_n);
-  if (m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  if (m == 0xFFFFFFFFFFFFFFFFULL)
   {
     ffdhe_compute_exp(a, p_r2_n, sk_n, pk_n, ss);
   }
@@ -310,7 +300,7 @@ uint64_t
 Hacl_FFDHE_ffdhe_shared_secret(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk, uint8_t *ss)
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t p_n[nLen + nLen];
   memset(p_n, 0U, (nLen + nLen) * sizeof (uint64_t));
diff --git a/src/Hacl_Frodo1344.c b/src/Hacl_Frodo1344.c
index 0696f34c..a565a85b 100644
--- a/src/Hacl_Frodo1344.c
+++ b/src/Hacl_Frodo1344.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo1344_crypto_bytes = (uint32_t)32U;
+uint32_t Hacl_Frodo1344_crypto_bytes = 32U;
 
-uint32_t Hacl_Frodo1344_crypto_publickeybytes = (uint32_t)21520U;
+uint32_t Hacl_Frodo1344_crypto_publickeybytes = 21520U;
 
-uint32_t Hacl_Frodo1344_crypto_secretkeybytes = (uint32_t)43088U;
+uint32_t Hacl_Frodo1344_crypto_secretkeybytes = 43088U;
 
-uint32_t Hacl_Frodo1344_crypto_ciphertextbytes = (uint32_t)21632U;
+uint32_t Hacl_Frodo1344_crypto_ciphertextbytes = 21632U;
 
 uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[80U] = { 0U };
-  randombytes_((uint32_t)80U, coins);
+  randombytes_(80U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)32U;
-  uint8_t *z = coins + (uint32_t)64U;
+  uint8_t *seed_se = coins + 32U;
+  uint8_t *z = coins + 64U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)21552U;
+  Hacl_Hash_SHA3_shake256_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 21552U;
   uint16_t s_matrix[10752U] = { 0U };
   uint16_t e_matrix[10752U] = { 0U };
   uint8_t r[43008U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43008U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U,
-    (uint32_t)8U,
-    r + (uint32_t)21504U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43008U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r + 21504U, e_matrix);
   uint16_t b_matrix[10752U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)1344U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)1344U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)1344U,
-    (uint32_t)8U,
-    (uint32_t)16U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)1344U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)10752U, uint16_t);
-  uint32_t slen1 = (uint32_t)43056U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(1344U, 1344U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(1344U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(1344U, 8U, 16U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(1344U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 10752U, uint16_t);
+  uint32_t slen1 = 43056U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)32U, pk, (uint32_t)21520U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)80U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 32U * sizeof (uint8_t));
+  memcpy(sk_p + 32U, pk, 21520U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(21520U, pk, 32U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 80U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[32U] = { 0U };
-  randombytes_((uint32_t)32U, coins);
+  randombytes_(32U, coins);
   uint8_t seed_se_k[64U] = { 0U };
   uint8_t pkh_mu[64U] = { 0U };
-  Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)32U, coins, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)64U, pkh_mu, (uint32_t)64U, seed_se_k);
+  Hacl_Hash_SHA3_shake256_hacl(21520U, pk, 32U, pkh_mu);
+  memcpy(pkh_mu + 32U, coins, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(64U, pkh_mu, 64U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)32U;
+  uint8_t *k = seed_se_k + 32U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[10752U] = { 0U };
   uint16_t ep_matrix[10752U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[43136U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, (uint32_t)1344U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)1344U,
-    r + (uint32_t)21504U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)43008U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)21504U;
+  uint8_t *c2 = ct + 21504U;
   uint16_t bp_matrix[10752U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)1344U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 1344U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 1344U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 1344U, 16U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[10752U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(1344U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)21664U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 21664U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t shake_input_ss[ss_init_len];
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)21632U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)21632U, k, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)32U, ss);
+  memcpy(shake_input_ss, ct, 21632U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 21632U, k, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 32U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)32U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t);
+  Lib_Memzero0_memzero(coins, 32U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[10752U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)21504U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix);
+  uint8_t *c2 = ct + 21504U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 1344U, 16U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 16U, c2, c_matrix);
   uint8_t mu_decode[32U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)21552U;
+  uint8_t *s_bytes = sk + 21552U;
   uint16_t s_matrix[10752U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)1344U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(1344U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 1344U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 4U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[64U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)64U;
+  uint32_t pkh_mu_decode_len = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t pkh_mu_decode[pkh_mu_decode_len];
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)43056U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)32U, mu_decode, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)64U, seed_se_k);
+  uint8_t *pkh = sk + 43056U;
+  memcpy(pkh_mu_decode, pkh, 32U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 32U, mu_decode, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 64U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)32U;
+  uint8_t *kp = seed_se_k + 32U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[10752U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,80 +175,58 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[43136U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, (uint32_t)1344U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)1344U,
-    r + (uint32_t)21504U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)43008U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)32U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix);
+  uint8_t *pk = sk + 32U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)1344U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 1344U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 1344U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[10752U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(1344U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)1344U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 1344U, 16U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 1344U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;
   }
-  uint32_t ss_init_len = (uint32_t)21664U;
+  uint32_t ss_init_len = 21664U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t ss_init[ss_init_len];
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)21632U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)21632U, kp_s, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)32U, ss);
+  memcpy(ss_init, ct, 21632U * sizeof (uint8_t));
+  memcpy(ss_init + 21632U, kp_s, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(ss_init_len, ss_init, 32U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)32U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 32U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 32U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/Hacl_Frodo64.c b/src/Hacl_Frodo64.c
index 575390e3..91434038 100644
--- a/src/Hacl_Frodo64.c
+++ b/src/Hacl_Frodo64.c
@@ -34,145 +34,111 @@
  */
 
 
-uint32_t Hacl_Frodo64_crypto_bytes = (uint32_t)16U;
+uint32_t Hacl_Frodo64_crypto_bytes = 16U;
 
-uint32_t Hacl_Frodo64_crypto_publickeybytes = (uint32_t)976U;
+uint32_t Hacl_Frodo64_crypto_publickeybytes = 976U;
 
-uint32_t Hacl_Frodo64_crypto_secretkeybytes = (uint32_t)2032U;
+uint32_t Hacl_Frodo64_crypto_secretkeybytes = 2032U;
 
-uint32_t Hacl_Frodo64_crypto_ciphertextbytes = (uint32_t)1080U;
+uint32_t Hacl_Frodo64_crypto_ciphertextbytes = 1080U;
 
 uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[48U] = { 0U };
-  randombytes_((uint32_t)48U, coins);
+  randombytes_(48U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)16U;
-  uint8_t *z = coins + (uint32_t)32U;
+  uint8_t *seed_se = coins + 16U;
+  uint8_t *z = coins + 32U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)992U;
+  Hacl_Hash_SHA3_shake128_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 992U;
   uint16_t s_matrix[512U] = { 0U };
   uint16_t e_matrix[512U] = { 0U };
   uint8_t r[2048U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2048U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U,
-    (uint32_t)8U,
-    r + (uint32_t)1024U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2048U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r + 1024U, e_matrix);
   uint16_t b_matrix[512U] = { 0U };
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)64U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)64U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b_matrix, b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)64U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)512U, uint16_t);
-  uint32_t slen1 = (uint32_t)2016U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(64U, 64U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(64U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(64U, 8U, 15U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(64U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 512U, uint16_t);
+  uint32_t slen1 = 2016U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)976U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)48U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 16U * sizeof (uint8_t));
+  memcpy(sk_p + 16U, pk, 976U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(976U, pk, 16U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 48U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[16U] = { 0U };
-  randombytes_((uint32_t)16U, coins);
+  randombytes_(16U, coins);
   uint8_t seed_se_k[32U] = { 0U };
   uint8_t pkh_mu[32U] = { 0U };
-  Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k);
+  Hacl_Hash_SHA3_shake128_hacl(976U, pk, 16U, pkh_mu);
+  memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)16U;
+  uint8_t *k = seed_se_k + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[512U] = { 0U };
   uint16_t ep_matrix[512U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[2176U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)64U,
-    r + (uint32_t)1024U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)2048U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)960U;
+  uint8_t *c2 = ct + 960U;
   uint16_t bp_matrix[512U] = { 0U };
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)64U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 64U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 64U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 64U, 15U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[512U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(64U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)1096U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 1096U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t shake_input_ss[ss_init_len];
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)1080U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)1080U, k, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss);
+  memcpy(shake_input_ss, ct, 1080U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 1080U, k, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(coins, 16U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -180,39 +146,30 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[512U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)960U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix);
+  uint8_t *c2 = ct + 960U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 64U, 15U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 15U, c2, c_matrix);
   uint8_t mu_decode[16U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)992U;
+  uint8_t *s_bytes = sk + 992U;
   uint16_t s_matrix[512U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)64U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(64U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 64U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[32U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)32U;
+  uint32_t pkh_mu_decode_len = 32U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t pkh_mu_decode[pkh_mu_decode_len];
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)2016U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k);
+  uint8_t *pkh = sk + 2016U;
+  memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)16U;
+  uint8_t *kp = seed_se_k + 16U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[512U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -221,80 +178,58 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[2176U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)64U,
-    r + (uint32_t)1024U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)2048U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)16U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix);
+  uint8_t *pk = sk + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)64U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 64U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 64U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[512U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(64U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)64U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 64U, 15U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 64U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;);
-  uint32_t ss_init_len = (uint32_t)1096U;
+  uint32_t ss_init_len = 1096U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t ss_init[ss_init_len];
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)1080U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)1080U, kp_s, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss);
+  memcpy(ss_init, ct, 1080U * sizeof (uint8_t));
+  memcpy(ss_init + 1080U, kp_s, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)16U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 16U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 16U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/Hacl_Frodo640.c b/src/Hacl_Frodo640.c
index 54af36d8..8baaee46 100644
--- a/src/Hacl_Frodo640.c
+++ b/src/Hacl_Frodo640.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo640_crypto_bytes = (uint32_t)16U;
+uint32_t Hacl_Frodo640_crypto_bytes = 16U;
 
-uint32_t Hacl_Frodo640_crypto_publickeybytes = (uint32_t)9616U;
+uint32_t Hacl_Frodo640_crypto_publickeybytes = 9616U;
 
-uint32_t Hacl_Frodo640_crypto_secretkeybytes = (uint32_t)19888U;
+uint32_t Hacl_Frodo640_crypto_secretkeybytes = 19888U;
 
-uint32_t Hacl_Frodo640_crypto_ciphertextbytes = (uint32_t)9720U;
+uint32_t Hacl_Frodo640_crypto_ciphertextbytes = 9720U;
 
 uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[48U] = { 0U };
-  randombytes_((uint32_t)48U, coins);
+  randombytes_(48U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)16U;
-  uint8_t *z = coins + (uint32_t)32U;
+  uint8_t *seed_se = coins + 16U;
+  uint8_t *z = coins + 32U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)9632U;
+  Hacl_Hash_SHA3_shake128_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 9632U;
   uint16_t s_matrix[5120U] = { 0U };
   uint16_t e_matrix[5120U] = { 0U };
   uint8_t r[20480U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20480U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U,
-    (uint32_t)8U,
-    r + (uint32_t)10240U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20480U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r + 10240U, e_matrix);
   uint16_t b_matrix[5120U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)640U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)640U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)640U,
-    (uint32_t)8U,
-    (uint32_t)15U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)640U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)5120U, uint16_t);
-  uint32_t slen1 = (uint32_t)19872U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(640U, 640U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(640U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(640U, 8U, 15U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(640U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 5120U, uint16_t);
+  uint32_t slen1 = 19872U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)9616U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)48U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 16U * sizeof (uint8_t));
+  memcpy(sk_p + 16U, pk, 9616U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(9616U, pk, 16U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 48U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[16U] = { 0U };
-  randombytes_((uint32_t)16U, coins);
+  randombytes_(16U, coins);
   uint8_t seed_se_k[32U] = { 0U };
   uint8_t pkh_mu[32U] = { 0U };
-  Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k);
+  Hacl_Hash_SHA3_shake128_hacl(9616U, pk, 16U, pkh_mu);
+  memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)16U;
+  uint8_t *k = seed_se_k + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[5120U] = { 0U };
   uint16_t ep_matrix[5120U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[20608U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, (uint32_t)640U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)640U,
-    r + (uint32_t)10240U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)20480U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)9600U;
+  uint8_t *c2 = ct + 9600U;
   uint16_t bp_matrix[5120U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)640U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 640U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 640U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 640U, 15U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[5120U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(640U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)9736U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 9736U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t shake_input_ss[ss_init_len];
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)9720U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)9720U, k, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss);
+  memcpy(shake_input_ss, ct, 9720U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 9720U, k, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(coins, 16U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[5120U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)9600U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix);
+  uint8_t *c2 = ct + 9600U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 640U, 15U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 15U, c2, c_matrix);
   uint8_t mu_decode[16U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)9632U;
+  uint8_t *s_bytes = sk + 9632U;
   uint16_t s_matrix[5120U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)640U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(640U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 640U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[32U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)32U;
+  uint32_t pkh_mu_decode_len = 32U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t pkh_mu_decode[pkh_mu_decode_len];
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)19872U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k);
+  uint8_t *pkh = sk + 19872U;
+  memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)16U;
+  uint8_t *kp = seed_se_k + 16U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[5120U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,81 +175,59 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[20608U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, (uint32_t)640U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)640U,
-    r + (uint32_t)10240U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)20480U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)16U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix);
+  uint8_t *pk = sk + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)640U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 640U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 640U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[5120U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(640U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)640U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 640U, 15U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 640U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;);
-  uint32_t ss_init_len = (uint32_t)9736U;
+  uint32_t ss_init_len = 9736U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t ss_init[ss_init_len];
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)9720U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)9720U, kp_s, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss);
+  memcpy(ss_init, ct, 9720U * sizeof (uint8_t));
+  memcpy(ss_init + 9720U, kp_s, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)16U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 16U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 16U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/Hacl_Frodo976.c b/src/Hacl_Frodo976.c
index 2e6aa6f0..76d78a30 100644
--- a/src/Hacl_Frodo976.c
+++ b/src/Hacl_Frodo976.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo976_crypto_bytes = (uint32_t)24U;
+uint32_t Hacl_Frodo976_crypto_bytes = 24U;
 
-uint32_t Hacl_Frodo976_crypto_publickeybytes = (uint32_t)15632U;
+uint32_t Hacl_Frodo976_crypto_publickeybytes = 15632U;
 
-uint32_t Hacl_Frodo976_crypto_secretkeybytes = (uint32_t)31296U;
+uint32_t Hacl_Frodo976_crypto_secretkeybytes = 31296U;
 
-uint32_t Hacl_Frodo976_crypto_ciphertextbytes = (uint32_t)15744U;
+uint32_t Hacl_Frodo976_crypto_ciphertextbytes = 15744U;
 
 uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[64U] = { 0U };
-  randombytes_((uint32_t)64U, coins);
+  randombytes_(64U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)24U;
-  uint8_t *z = coins + (uint32_t)48U;
+  uint8_t *seed_se = coins + 24U;
+  uint8_t *z = coins + 48U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)15656U;
+  Hacl_Hash_SHA3_shake256_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 15656U;
   uint16_t s_matrix[7808U] = { 0U };
   uint16_t e_matrix[7808U] = { 0U };
   uint8_t r[31232U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31232U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U,
-    (uint32_t)8U,
-    r + (uint32_t)15616U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31232U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r + 15616U, e_matrix);
   uint16_t b_matrix[7808U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)976U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)976U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)976U,
-    (uint32_t)8U,
-    (uint32_t)16U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)976U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)7808U, uint16_t);
-  uint32_t slen1 = (uint32_t)31272U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(976U, 976U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(976U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(976U, 8U, 16U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(976U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 7808U, uint16_t);
+  uint32_t slen1 = 31272U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)24U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)24U, pk, (uint32_t)15632U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)64U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 24U * sizeof (uint8_t));
+  memcpy(sk_p + 24U, pk, 15632U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(15632U, pk, 24U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 64U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[24U] = { 0U };
-  randombytes_((uint32_t)24U, coins);
+  randombytes_(24U, coins);
   uint8_t seed_se_k[48U] = { 0U };
   uint8_t pkh_mu[48U] = { 0U };
-  Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)24U, coins, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)48U, pkh_mu, (uint32_t)48U, seed_se_k);
+  Hacl_Hash_SHA3_shake256_hacl(15632U, pk, 24U, pkh_mu);
+  memcpy(pkh_mu + 24U, coins, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(48U, pkh_mu, 48U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)24U;
+  uint8_t *k = seed_se_k + 24U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[7808U] = { 0U };
   uint16_t ep_matrix[7808U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[31360U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, (uint32_t)976U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)976U,
-    r + (uint32_t)15616U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)31232U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)15616U;
+  uint8_t *c2 = ct + 15616U;
   uint16_t bp_matrix[7808U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)976U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 976U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 976U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 976U, 16U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[7808U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(976U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)15768U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 15768U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t shake_input_ss[ss_init_len];
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)15744U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)15744U, k, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)24U, ss);
+  memcpy(shake_input_ss, ct, 15744U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 15744U, k, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 24U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)24U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t);
+  Lib_Memzero0_memzero(coins, 24U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[7808U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)15616U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix);
+  uint8_t *c2 = ct + 15616U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 976U, 16U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 16U, c2, c_matrix);
   uint8_t mu_decode[24U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)15656U;
+  uint8_t *s_bytes = sk + 15656U;
   uint16_t s_matrix[7808U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)976U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(976U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 976U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 3U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[48U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)48U;
+  uint32_t pkh_mu_decode_len = 48U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t pkh_mu_decode[pkh_mu_decode_len];
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)31272U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)24U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)24U, mu_decode, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)48U, seed_se_k);
+  uint8_t *pkh = sk + 31272U;
+  memcpy(pkh_mu_decode, pkh, 24U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 24U, mu_decode, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 48U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)24U;
+  uint8_t *kp = seed_se_k + 24U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[7808U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,80 +175,58 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[31360U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, (uint32_t)976U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)976U,
-    r + (uint32_t)15616U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)31232U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)24U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix);
+  uint8_t *pk = sk + 24U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)976U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 976U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 976U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[7808U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(976U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)976U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 976U, 16U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 976U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[24U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+  for (uint32_t i = 0U; i < 24U; i++)
   {
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;
   }
-  uint32_t ss_init_len = (uint32_t)15768U;
+  uint32_t ss_init_len = 15768U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t ss_init[ss_init_len];
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)15744U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)15744U, kp_s, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)24U, ss);
+  memcpy(ss_init, ct, 15744U * sizeof (uint8_t));
+  memcpy(ss_init + 15744U, kp_s, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(ss_init_len, ss_init, 24U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)24U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)24U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 24U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 24U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/Hacl_Frodo_KEM.c b/src/Hacl_Frodo_KEM.c
index 4265ac0e..e0a65a47 100644
--- a/src/Hacl_Frodo_KEM.c
+++ b/src/Hacl_Frodo_KEM.c
@@ -30,6 +30,6 @@
 
 void randombytes_(uint32_t len, uint8_t *res)
 {
-  KRML_HOST_IGNORE(Lib_RandomBuffer_System_randombytes(res, len));
+  Lib_RandomBuffer_System_randombytes(res, len);
 }
 
diff --git a/src/Hacl_GenericField32.c b/src/Hacl_GenericField32.c
index 8bd08a00..f509e6d4 100644
--- a/src/Hacl_GenericField32.c
+++ b/src/Hacl_GenericField32.c
@@ -56,7 +56,7 @@ Check whether this library will work for a modulus `n`.
 bool Hacl_GenericField32_field_modulus_check(uint32_t len, uint32_t *n)
 {
   uint32_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u32(len, n);
-  return m == (uint32_t)0xFFFFFFFFU;
+  return m == 0xFFFFFFFFU;
 }
 
 /**
@@ -82,7 +82,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -283,27 +283,27 @@ Hacl_GenericField32_exp_consttime(
   uint32_t aMc[k1.len];
   memset(aMc, 0U, k1.len * sizeof (uint32_t));
   memcpy(aMc, aM, k1.len * sizeof (uint32_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len1; i++)
+      for (uint32_t i = 0U; i < len1; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aMc[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aMc[i]);
         resM[i] = resM[i] ^ dummy;
         aMc[i] = aMc[i] ^ dummy;
       }
@@ -314,9 +314,9 @@ Hacl_GenericField32_exp_consttime(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len1; i++)
+    for (uint32_t i = 0U; i < len1; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aMc[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aMc[i]);
       resM[i] = resM[i] ^ dummy;
       aMc[i] = aMc[i] ^ dummy;
     }
@@ -324,22 +324,22 @@ Hacl_GenericField32_exp_consttime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 32U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1);
-    uint32_t table[(uint32_t)16U * len1];
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1);
+    uint32_t table[16U * len1];
+    memset(table, 0U, 16U * len1 * sizeof (uint32_t));
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t tmp[len1];
     memset(tmp, 0U, len1 * sizeof (uint32_t));
@@ -350,29 +350,29 @@ Hacl_GenericField32_exp_consttime(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint32_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint32_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint32_t *t11 = table + (i + 1U) * len1;
       uint32_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint32_t));
-      uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t));
+      uint32_t *t2 = table + (2U * i + 2U) * len1;
       uint32_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint32_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-      memcpy(resM, (uint32_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint32_t));
+      uint32_t i0 = bBits / 4U * 4U;
+      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+      memcpy(resM, (uint32_t *)(table + 0U * len1), len1 * sizeof (uint32_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-        const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+        const uint32_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint32_t *os = resM;
           uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -388,24 +388,24 @@ Hacl_GenericField32_exp_consttime(
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t tmp0[len1];
     memset(tmp0, 0U, len1 * sizeof (uint32_t));
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
     {
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint32_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, (uint32_t)4U);
-      memcpy(tmp0, (uint32_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint32_t));
+      uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U;
+      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U);
+      memcpy(tmp0, (uint32_t *)(table + 0U * len1), len1 * sizeof (uint32_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-        const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+        const uint32_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint32_t *os = tmp0;
           uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -450,7 +450,7 @@ Hacl_GenericField32_exp_vartime(
   uint32_t aMc[k1.len];
   memset(aMc, 0U, k1.len * sizeof (uint32_t));
   memcpy(aMc, aM, k1.len * sizeof (uint32_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t ctx[len1 + len1];
@@ -460,13 +460,13 @@ Hacl_GenericField32_exp_vartime(
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, k1.mu, resM, aMc, resM);
@@ -478,22 +478,22 @@ Hacl_GenericField32_exp_vartime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 32U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1);
-    uint32_t table[(uint32_t)16U * len1];
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1);
+    uint32_t table[16U * len1];
+    memset(table, 0U, 16U * len1 * sizeof (uint32_t));
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t tmp[len1];
     memset(tmp, 0U, len1 * sizeof (uint32_t));
@@ -504,21 +504,21 @@ Hacl_GenericField32_exp_vartime(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint32_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint32_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint32_t *t11 = table + (i + 1U) * len1;
       uint32_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint32_t));
-      uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t));
+      uint32_t *t2 = table + (2U * i + 2U) * len1;
       uint32_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint32_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+      uint32_t i = bBits / 4U * 4U;
+      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
       uint32_t bits_l32 = bits_c;
       const uint32_t *a_bits_l = table + bits_l32 * len1;
       memcpy(resM, (uint32_t *)a_bits_l, len1 * sizeof (uint32_t));
@@ -532,16 +532,16 @@ Hacl_GenericField32_exp_vartime(
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t tmp0[len1];
     memset(tmp0, 0U, len1 * sizeof (uint32_t));
-    for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < bBits / 4U; i++)
     {
       KRML_MAYBE_FOR4(i0,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint32_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, (uint32_t)4U);
+      uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U;
+      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U);
       uint32_t bits_l32 = bits_l;
       const uint32_t *a_bits_l = table + bits_l32 * len1;
       memcpy(tmp0, (uint32_t *)a_bits_l, len1 * sizeof (uint32_t));
@@ -574,38 +574,33 @@ Hacl_GenericField32_inverse(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t n2[len1];
   memset(n2, 0U, len1 * sizeof (uint32_t));
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
   uint32_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint32_t *a1 = k1.n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t *a1 = k1.n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     c1 = c10;
@@ -614,7 +609,7 @@ Hacl_GenericField32_inverse(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
-  Hacl_GenericField32_exp_vartime(k, aM, k1.len * (uint32_t)32U, n2, aInvM);
+  KRML_MAYBE_UNUSED_VAR(c1);
+  Hacl_GenericField32_exp_vartime(k, aM, k1.len * 32U, n2, aInvM);
 }
 
diff --git a/src/Hacl_GenericField64.c b/src/Hacl_GenericField64.c
index 7c11d3b7..3f291d36 100644
--- a/src/Hacl_GenericField64.c
+++ b/src/Hacl_GenericField64.c
@@ -55,7 +55,7 @@ Check whether this library will work for a modulus `n`.
 bool Hacl_GenericField64_field_modulus_check(uint32_t len, uint64_t *n)
 {
   uint64_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u64(len, n);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -81,7 +81,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint64_t));
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -282,27 +282,27 @@ Hacl_GenericField64_exp_consttime(
   uint64_t aMc[k1.len];
   memset(aMc, 0U, k1.len * sizeof (uint64_t));
   memcpy(aMc, aM, k1.len * sizeof (uint64_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len1; i++)
+      for (uint32_t i = 0U; i < len1; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aMc[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aMc[i]);
         resM[i] = resM[i] ^ dummy;
         aMc[i] = aMc[i] ^ dummy;
       }
@@ -313,9 +313,9 @@ Hacl_GenericField64_exp_consttime(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len1; i++)
+    for (uint32_t i = 0U; i < len1; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aMc[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aMc[i]);
       resM[i] = resM[i] ^ dummy;
       aMc[i] = aMc[i] ^ dummy;
     }
@@ -323,22 +323,22 @@ Hacl_GenericField64_exp_consttime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 64U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1);
-    uint64_t table[(uint32_t)16U * len1];
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1);
+    uint64_t table[16U * len1];
+    memset(table, 0U, 16U * len1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t tmp[len1];
     memset(tmp, 0U, len1 * sizeof (uint64_t));
@@ -349,29 +349,29 @@ Hacl_GenericField64_exp_consttime(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint64_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint64_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint64_t *t11 = table + (i + 1U) * len1;
       uint64_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint64_t));
-      uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t));
+      uint64_t *t2 = table + (2U * i + 2U) * len1;
       uint64_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint64_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-      memcpy(resM, (uint64_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint64_t));
+      uint32_t i0 = bBits / 4U * 4U;
+      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+      memcpy(resM, (uint64_t *)(table + 0U * len1), len1 * sizeof (uint64_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-        const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+        const uint64_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint64_t *os = resM;
           uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -387,24 +387,24 @@ Hacl_GenericField64_exp_consttime(
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t tmp0[len1];
     memset(tmp0, 0U, len1 * sizeof (uint64_t));
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
     {
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, (uint32_t)4U);
-      memcpy(tmp0, (uint64_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint64_t));
+      uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U;
+      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U);
+      memcpy(tmp0, (uint64_t *)(table + 0U * len1), len1 * sizeof (uint64_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-        const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+        const uint64_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint64_t *os = tmp0;
           uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -449,7 +449,7 @@ Hacl_GenericField64_exp_vartime(
   uint64_t aMc[k1.len];
   memset(aMc, 0U, k1.len * sizeof (uint64_t));
   memcpy(aMc, aM, k1.len * sizeof (uint64_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t ctx[len1 + len1];
@@ -459,13 +459,13 @@ Hacl_GenericField64_exp_vartime(
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, k1.mu, resM, aMc, resM);
@@ -477,22 +477,22 @@ Hacl_GenericField64_exp_vartime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 64U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t ctx[len1 + len1];
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1);
-    uint64_t table[(uint32_t)16U * len1];
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1);
+    uint64_t table[16U * len1];
+    memset(table, 0U, 16U * len1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t tmp[len1];
     memset(tmp, 0U, len1 * sizeof (uint64_t));
@@ -503,21 +503,21 @@ Hacl_GenericField64_exp_vartime(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint64_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint64_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint64_t *t11 = table + (i + 1U) * len1;
       uint64_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint64_t));
-      uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t));
+      uint64_t *t2 = table + (2U * i + 2U) * len1;
       uint64_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint64_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+      uint32_t i = bBits / 4U * 4U;
+      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
       uint32_t bits_l32 = (uint32_t)bits_c;
       const uint64_t *a_bits_l = table + bits_l32 * len1;
       memcpy(resM, (uint64_t *)a_bits_l, len1 * sizeof (uint64_t));
@@ -531,16 +531,16 @@ Hacl_GenericField64_exp_vartime(
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t tmp0[len1];
     memset(tmp0, 0U, len1 * sizeof (uint64_t));
-    for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < bBits / 4U; i++)
     {
       KRML_MAYBE_FOR4(i0,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, (uint32_t)4U);
+      uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U;
+      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U);
       uint32_t bits_l32 = (uint32_t)bits_l;
       const uint64_t *a_bits_l = table + bits_l32 * len1;
       memcpy(tmp0, (uint64_t *)a_bits_l, len1 * sizeof (uint64_t));
@@ -573,38 +573,33 @@ Hacl_GenericField64_inverse(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t n2[len1];
   memset(n2, 0U, len1 * sizeof (uint64_t));
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
   uint64_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint64_t *a1 = k1.n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t *a1 = k1.n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     c1 = c10;
@@ -613,7 +608,7 @@ Hacl_GenericField64_inverse(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
-  Hacl_GenericField64_exp_vartime(k, aM, k1.len * (uint32_t)64U, n2, aInvM);
+  KRML_MAYBE_UNUSED_VAR(c1);
+  Hacl_GenericField64_exp_vartime(k, aM, k1.len * 64U, n2, aInvM);
 }
 
diff --git a/src/Hacl_HKDF.c b/src/Hacl_HKDF.c
index 6148337c..027b719f 100644
--- a/src/Hacl_HKDF.c
+++ b/src/Hacl_HKDF.c
@@ -45,39 +45,39 @@ Hacl_HKDF_expand_sha2_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -125,39 +125,39 @@ Hacl_HKDF_expand_sha2_384(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)48U;
+  uint32_t tlen = 48U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -205,39 +205,39 @@ Hacl_HKDF_expand_sha2_512(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -285,39 +285,39 @@ Hacl_HKDF_expand_blake2s_32(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -365,39 +365,39 @@ Hacl_HKDF_expand_blake2b_32(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/Hacl_HKDF_Blake2b_256.c b/src/Hacl_HKDF_Blake2b_256.c
index 0d28292a..fe89115d 100644
--- a/src/Hacl_HKDF_Blake2b_256.c
+++ b/src/Hacl_HKDF_Blake2b_256.c
@@ -45,47 +45,39 @@ Hacl_HKDF_Blake2b_256_expand_blake2b_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/Hacl_HKDF_Blake2s_128.c b/src/Hacl_HKDF_Blake2s_128.c
index ec1e727e..4c9e9450 100644
--- a/src/Hacl_HKDF_Blake2s_128.c
+++ b/src/Hacl_HKDF_Blake2s_128.c
@@ -45,47 +45,39 @@ Hacl_HKDF_Blake2s_128_expand_blake2s_128(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t text[tlen + infolen + (uint32_t)1U];
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t text[tlen + infolen + 1U];
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/Hacl_HMAC.c b/src/Hacl_HMAC.c
index 7b4b36f0..b03bc7ac 100644
--- a/src/Hacl_HMAC.c
+++ b/src/Hacl_HMAC.c
@@ -23,12 +23,13 @@
  */
 
 
-#include "Hacl_HMAC.h"
+#include "internal/Hacl_HMAC.h"
 
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b.h"
 
 /**
 Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`.
@@ -37,7 +38,7 @@ The key can be any length and will be hashed if it is longer and padded if it is
 `dst` must point to 20 bytes of memory.
 */
 void
-Hacl_HMAC_legacy_compute_sha1(
+Hacl_HMAC_compute_sha1(
   uint8_t *dst,
   uint8_t *key,
   uint32_t key_len,
@@ -45,68 +46,63 @@ Hacl_HMAC_legacy_compute_sha1(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)20U;
+    ite = 20U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Hash_SHA1_legacy_hash(key, key_len, nkey);
+    Hacl_Hash_SHA1_hash_oneshot(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U);
+    Hacl_Hash_SHA1_update_last(s, 0ULL, ipad, 64U);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -118,25 +114,21 @@ Hacl_HMAC_legacy_compute_sha1(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U);
-    Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-    Hacl_Hash_SHA1_legacy_update_last(s,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-      rem,
-      rem_len);
+    Hacl_Hash_SHA1_update_multi(s, ipad, 1U);
+    Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks);
+    Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   }
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst1);
+  Hacl_Hash_SHA1_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_Hash_Core_SHA1_legacy_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)20U / block_len;
-  uint32_t rem0 = (uint32_t)20U % block_len;
+  Hacl_Hash_SHA1_init(s);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 20U / block_len;
+  uint32_t rem0 = 20U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)20U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len });
   }
   else
   {
@@ -147,13 +139,10 @@ Hacl_HMAC_legacy_compute_sha1(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U);
-  Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-  Hacl_Hash_SHA1_legacy_update_last(s,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-    rem,
-    rem_len);
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
+  Hacl_Hash_SHA1_update_multi(s, opad, 1U);
+  Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks);
+  Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
+  Hacl_Hash_SHA1_finish(s, dst);
 }
 
 /**
@@ -171,74 +160,71 @@ Hacl_HMAC_compute_sha2_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_256(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_256(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t x = Hacl_Hash_SHA2_h256[i];
     os[i] = x;);
   uint32_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)0U + (uint64_t)(uint32_t)64U,
-      (uint32_t)64U,
-      ipad,
-      s);
+    Hacl_Hash_SHA2_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -250,27 +236,24 @@ Hacl_HMAC_compute_sha2_256(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, ipad, s);
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * (uint32_t)64U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
-      + (uint64_t)full_blocks_len
-      + (uint64_t)rem_len,
+    Hacl_Hash_SHA2_sha256_update_nblocks(64U, ipad, s);
+    Hacl_Hash_SHA2_sha256_update_nblocks(n_blocks * 64U, full_blocks, s);
+    Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len,
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst1);
+  Hacl_Hash_SHA2_sha256_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha256_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_SHA2_sha256_init(s);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -281,15 +264,13 @@ Hacl_HMAC_compute_sha2_256(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, opad, s);
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * (uint32_t)64U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
-    + (uint64_t)full_blocks_len
-    + (uint64_t)rem_len,
+  Hacl_Hash_SHA2_sha256_update_nblocks(64U, opad, s);
+  Hacl_Hash_SHA2_sha256_update_nblocks(n_blocks * 64U, full_blocks, s);
+  Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len,
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst);
+  Hacl_Hash_SHA2_sha256_finish(s, dst);
 }
 
 /**
@@ -307,75 +288,75 @@ Hacl_HMAC_compute_sha2_384(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)48U;
+    ite = 48U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_384(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_384(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
+    uint64_t x = Hacl_Hash_SHA2_h384[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -387,27 +368,26 @@ Hacl_HMAC_compute_sha2_384(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_Hash_SHA2_sha384_update_nblocks(128U, ipad, s);
+    Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha384_finish(s, dst1);
+  Hacl_Hash_SHA2_sha384_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha384_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)48U / block_len;
-  uint32_t rem0 = (uint32_t)48U % block_len;
+  Hacl_Hash_SHA2_sha384_init(s);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 48U / block_len;
+  uint32_t rem0 = 48U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)48U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len });
   }
   else
   {
@@ -418,15 +398,15 @@ Hacl_HMAC_compute_sha2_384(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_Hash_SHA2_sha384_update_nblocks(128U, opad, s);
+  Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha384_finish(s, dst);
+  Hacl_Hash_SHA2_sha384_finish(s, dst);
 }
 
 /**
@@ -444,75 +424,75 @@ Hacl_HMAC_compute_sha2_512(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_512(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_512(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
+    uint64_t x = Hacl_Hash_SHA2_h512[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -524,27 +504,26 @@ Hacl_HMAC_compute_sha2_512(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_Hash_SHA2_sha512_update_nblocks(128U, ipad, s);
+    Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha512_finish(s, dst1);
+  Hacl_Hash_SHA2_sha512_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha512_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_SHA2_sha512_init(s);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -555,15 +534,15 @@ Hacl_HMAC_compute_sha2_512(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_Hash_SHA2_sha512_update_nblocks(128U, opad, s);
+  Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha512_finish(s, dst);
+  Hacl_Hash_SHA2_sha512_finish(s, dst);
 }
 
 /**
@@ -581,66 +560,66 @@ Hacl_HMAC_compute_blake2s_32(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_32_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2s_hash_with_key(nkey, 32U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t s[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Hash_Blake2s_init(s, 0U, 32U);
   uint32_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Hash_Blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -653,34 +632,33 @@ Hacl_HMAC_compute_blake2s_32(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     uint32_t wv0[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Hash_Blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
       full_blocks,
       n_blocks);
     uint32_t wv1[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last(rem_len,
+    Hacl_Hash_Blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Hash_Blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_32_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_Blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -692,22 +670,22 @@ Hacl_HMAC_compute_blake2s_32(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint32_t wv[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   uint32_t wv0[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Hash_Blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
     full_blocks,
     n_blocks);
   uint32_t wv1[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_last(rem_len,
+  Hacl_Hash_Blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Hash_Blake2s_finish(32U, dst, s0);
 }
 
 /**
@@ -725,71 +703,66 @@ Hacl_HMAC_compute_blake2b_32(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_32_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2b_hash_with_key(nkey, 64U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t s[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Hash_Blake2b_init(s, 0U, 64U);
   uint64_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last((uint32_t)128U,
-      wv,
-      s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
-      ipad);
+    Hacl_Hash_Blake2b_update_last(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -802,40 +775,34 @@ Hacl_HMAC_compute_blake2b_32(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-      wv,
-      s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      ipad,
-      (uint32_t)1U);
+    Hacl_Hash_Blake2b_update_multi(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), ipad, 1U);
     uint64_t wv0[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Hash_Blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
       full_blocks,
       n_blocks);
     uint64_t wv1[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last(rem_len,
+    Hacl_Hash_Blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Hash_Blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_32_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_Blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -847,27 +814,22 @@ Hacl_HMAC_compute_blake2b_32(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint64_t wv[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-    wv,
-    s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-    opad,
-    (uint32_t)1U);
+  Hacl_Hash_Blake2b_update_multi(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), opad, 1U);
   uint64_t wv0[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Hash_Blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
     full_blocks,
     n_blocks);
   uint64_t wv1[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_last(rem_len,
+  Hacl_Hash_Blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Hash_Blake2b_finish(64U, dst, s0);
 }
 
diff --git a/src/Hacl_HMAC_Blake2b_256.c b/src/Hacl_HMAC_Blake2b_256.c
index 71f75415..6197490a 100644
--- a/src/Hacl_HMAC_Blake2b_256.c
+++ b/src/Hacl_HMAC_Blake2b_256.c
@@ -26,7 +26,8 @@
 #include "Hacl_HMAC_Blake2b_256.h"
 
 #include "internal/Hacl_Krmllib.h"
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2b_Simd256.h"
+#include "internal/Hacl_HMAC.h"
 
 /**
 Write the HMAC-BLAKE2b MAC of a message (`data`) by using a key (`key`) into `dst`.
@@ -43,71 +44,71 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_256_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2b_Simd256_hash_with_key(nkey, 64U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Hash_Blake2b_Simd256_init(s, 0U, 64U);
   Lib_IntVector_Intrinsics_vec256 *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_last((uint32_t)128U,
+    Hacl_Hash_Blake2b_Simd256_update_last(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
+      FStar_UInt128_uint64_to_uint128(0ULL),
+      128U,
       ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -120,40 +121,39 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+    Hacl_Hash_Blake2b_Simd256_update_multi(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+      FStar_UInt128_uint64_to_uint128(0ULL),
       ipad,
-      (uint32_t)1U);
+      1U);
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Hash_Blake2b_Simd256_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
       full_blocks,
       n_blocks);
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv1[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_last(rem_len,
+    Hacl_Hash_Blake2b_Simd256_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Hash_Blake2b_Simd256_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_256_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_Blake2b_Simd256_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -165,27 +165,27 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+  Hacl_Hash_Blake2b_Simd256_update_multi(128U,
     wv,
     s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+    FStar_UInt128_uint64_to_uint128(0ULL),
     opad,
-    (uint32_t)1U);
+    1U);
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Hash_Blake2b_Simd256_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
     full_blocks,
     n_blocks);
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv1[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_last(rem_len,
+  Hacl_Hash_Blake2b_Simd256_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Hash_Blake2b_Simd256_finish(64U, dst, s0);
 }
 
diff --git a/src/Hacl_HMAC_Blake2s_128.c b/src/Hacl_HMAC_Blake2s_128.c
index bce00309..0741bffb 100644
--- a/src/Hacl_HMAC_Blake2s_128.c
+++ b/src/Hacl_HMAC_Blake2s_128.c
@@ -25,7 +25,8 @@
 
 #include "Hacl_HMAC_Blake2s_128.h"
 
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2s_Simd128.h"
+#include "internal/Hacl_HMAC.h"
 
 /**
 Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`.
@@ -42,66 +43,66 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t key_block[l];
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_128_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2s_Simd128_hash_with_key(nkey, 32U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t ipad[l];
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t opad[l];
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 s[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Hash_Blake2s_Simd128_init(s, 0U, 32U);
   Lib_IntVector_Intrinsics_vec128 *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Hash_Blake2s_Simd128_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -114,34 +115,33 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Hash_Blake2s_Simd128_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
       full_blocks,
       n_blocks);
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv1[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_last(rem_len,
+    Hacl_Hash_Blake2s_Simd128_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Hash_Blake2s_Simd128_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_128_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_Blake2s_Simd128_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -153,21 +153,21 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Hash_Blake2s_Simd128_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
     full_blocks,
     n_blocks);
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv1[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_last(rem_len,
+  Hacl_Hash_Blake2s_Simd128_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Hash_Blake2s_Simd128_finish(32U, dst, s0);
 }
 
diff --git a/src/Hacl_HMAC_DRBG.c b/src/Hacl_HMAC_DRBG.c
index 0a09aaed..13de88bc 100644
--- a/src/Hacl_HMAC_DRBG.c
+++ b/src/Hacl_HMAC_DRBG.c
@@ -25,15 +25,15 @@
 
 #include "Hacl_HMAC_DRBG.h"
 
-uint32_t Hacl_HMAC_DRBG_reseed_interval = (uint32_t)1024U;
+uint32_t Hacl_HMAC_DRBG_reseed_interval = 1024U;
 
-uint32_t Hacl_HMAC_DRBG_max_output_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_output_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_additional_input_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_additional_input_length = 65536U;
 
 /**
 Return the minimal entropy input length of the desired hash function.
@@ -46,19 +46,19 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     default:
       {
@@ -71,8 +71,8 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
 bool
 Hacl_HMAC_DRBG_uu___is_State(Spec_Hash_Definitions_hash_alg a, Hacl_HMAC_DRBG_state projectee)
 {
-  KRML_HOST_IGNORE(a);
-  KRML_HOST_IGNORE(projectee);
+  KRML_MAYBE_UNUSED_VAR(a);
+  KRML_MAYBE_UNUSED_VAR(projectee);
   return true;
 }
 
@@ -92,25 +92,25 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         k = buf;
         break;
       }
@@ -125,25 +125,25 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         v = buf;
         break;
       }
@@ -154,7 +154,7 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
       }
   }
   uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return ((Hacl_HMAC_DRBG_state){ .k = k, .v = v, .reseed_counter = ctr });
 }
 
@@ -200,45 +200,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)20U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 20U * sizeof (uint8_t));
+        memset(v, 1U, 20U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 21U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U,
+          memcpy(input0 + 21U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 21U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
+            memcpy(input + 21U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
         break;
       }
@@ -258,45 +256,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)32U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 32U * sizeof (uint8_t));
+        memset(v, 1U, 32U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 33U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U,
+          memcpy(input0 + 33U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 33U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
+            memcpy(input + 33U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
         break;
       }
@@ -316,45 +312,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)48U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 48U * sizeof (uint8_t));
+        memset(v, 1U, 48U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 49U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U,
+          memcpy(input0 + 49U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 49U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
+            memcpy(input + 49U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
         break;
       }
@@ -374,45 +368,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)64U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 64U * sizeof (uint8_t));
+        memset(v, 1U, 64U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 65U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U,
+          memcpy(input0 + 65U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 65U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
+            memcpy(input + 65U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
         break;
       }
@@ -460,42 +452,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 21U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U,
+          memcpy(input0 + 21U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 21U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
+            memcpy(input + 21U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
@@ -512,42 +504,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 33U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U,
+          memcpy(input0 + 33U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 33U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
+            memcpy(input + 33U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
@@ -564,42 +556,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 49U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U,
+          memcpy(input0 + 49U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 49U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
+            memcpy(input + 49U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
@@ -616,42 +608,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 65U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U,
+          memcpy(input0 + 65U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 65U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
+            memcpy(input + 65U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     default:
@@ -693,93 +685,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)21U + additional_input_len;
+          uint32_t input_len = 21U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t input0[input_len];
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)21U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[20U] = (uint8_t)0U;
-          Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-          Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[20U] = 0U;
+          Hacl_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+          Hacl_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+          memcpy(k, k_, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+            uint32_t input_len0 = 21U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t input[input_len0];
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 20U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)21U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[20U] = (uint8_t)1U;
-            Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-            Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-            memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+            input[20U] = 1U;
+            Hacl_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+            Hacl_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+            memcpy(k, k_0, 20U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)20U;
+        uint32_t max = n / 20U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha1(v, k, 20U, v, 20U);
+          memcpy(out + i * 20U, v, 20U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)20U < n)
+        if (max * 20U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)20U;
-          Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 20U;
+          Hacl_HMAC_compute_sha1(v, k, 20U, v, 20U);
+          memcpy(block, v, (n - max * 20U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)21U + additional_input_len;
+        uint32_t input_len = 21U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+          uint32_t input_len0 = 21U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_256:
@@ -791,93 +777,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)33U + additional_input_len;
+          uint32_t input_len = 33U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t input0[input_len];
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)33U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[32U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-          Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[32U] = 0U;
+          Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+          Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+          memcpy(k, k_, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+            uint32_t input_len0 = 33U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t input[input_len0];
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 32U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)33U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[32U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-            Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-            memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+            input[32U] = 1U;
+            Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+            Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+            memcpy(k, k_0, 32U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)32U;
+        uint32_t max = n / 32U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+          memcpy(out + i * 32U, v, 32U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)32U < n)
+        if (max * 32U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)32U;
-          Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 32U;
+          Hacl_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+          memcpy(block, v, (n - max * 32U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)33U + additional_input_len;
+        uint32_t input_len = 33U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+          uint32_t input_len0 = 33U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_384:
@@ -889,93 +869,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)49U + additional_input_len;
+          uint32_t input_len = 49U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t input0[input_len];
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)49U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[48U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-          Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[48U] = 0U;
+          Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+          Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+          memcpy(k, k_, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+            uint32_t input_len0 = 49U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t input[input_len0];
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 48U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)49U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[48U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-            Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-            memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+            input[48U] = 1U;
+            Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+            Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+            memcpy(k, k_0, 48U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)48U;
+        uint32_t max = n / 48U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+          memcpy(out + i * 48U, v, 48U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)48U < n)
+        if (max * 48U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)48U;
-          Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 48U;
+          Hacl_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+          memcpy(block, v, (n - max * 48U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)49U + additional_input_len;
+        uint32_t input_len = 49U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+          uint32_t input_len0 = 49U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_512:
@@ -987,93 +961,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)65U + additional_input_len;
+          uint32_t input_len = 65U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t input0[input_len];
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)65U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[64U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-          Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[64U] = 0U;
+          Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+          Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+          memcpy(k, k_, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+            uint32_t input_len0 = 65U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t input[input_len0];
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 64U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)65U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[64U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-            Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-            memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+            input[64U] = 1U;
+            Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+            Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+            memcpy(k, k_0, 64U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)64U;
+        uint32_t max = n / 64U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+          memcpy(out + i * 64U, v, 64U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)64U < n)
+        if (max * 64U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)64U;
-          Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 64U;
+          Hacl_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+          memcpy(block, v, (n - max * 64U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)65U + additional_input_len;
+        uint32_t input_len = 65U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t input0[input_len];
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+          uint32_t input_len0 = 65U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t input[input_len0];
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     default:
@@ -1086,7 +1054,7 @@ Hacl_HMAC_DRBG_generate(
 
 void Hacl_HMAC_DRBG_free(Spec_Hash_Definitions_hash_alg uu___, Hacl_HMAC_DRBG_state s)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
diff --git a/src/Hacl_HPKE_Curve51_CP128_SHA256.c b/src/Hacl_HPKE_Curve51_CP128_SHA256.c
index 5814ae67..e8df237e 100644
--- a/src/Hacl_HPKE_Curve51_CP128_SHA256.c
+++ b/src/Hacl_HPKE_Curve51_CP128_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve51_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve51_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP128_SHA512.c b/src/Hacl_HPKE_Curve51_CP128_SHA512.c
index c6aff2e5..62fce254 100644
--- a/src/Hacl_HPKE_Curve51_CP128_SHA512.c
+++ b/src/Hacl_HPKE_Curve51_CP128_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve51_CP128_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve51_CP128_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP256_SHA256.c b/src/Hacl_HPKE_Curve51_CP256_SHA256.c
index 4c448589..9862d19d 100644
--- a/src/Hacl_HPKE_Curve51_CP256_SHA256.c
+++ b/src/Hacl_HPKE_Curve51_CP256_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve51_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve51_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP256_SHA512.c b/src/Hacl_HPKE_Curve51_CP256_SHA512.c
index 1ee26ea0..cafcf2c7 100644
--- a/src/Hacl_HPKE_Curve51_CP256_SHA512.c
+++ b/src/Hacl_HPKE_Curve51_CP256_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve51_CP256_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve51_CP256_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP32_SHA256.c b/src/Hacl_HPKE_Curve51_CP32_SHA256.c
index bc59f64a..3db57fa8 100644
--- a/src/Hacl_HPKE_Curve51_CP32_SHA256.c
+++ b/src/Hacl_HPKE_Curve51_CP32_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,43 +552,45 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +608,7 @@ Hacl_HPKE_Curve51_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +619,44 @@ Hacl_HPKE_Curve51_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve51_CP32_SHA512.c b/src/Hacl_HPKE_Curve51_CP32_SHA512.c
index 0314c71c..84889570 100644
--- a/src/Hacl_HPKE_Curve51_CP32_SHA512.c
+++ b/src/Hacl_HPKE_Curve51_CP32_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,43 +552,45 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +608,7 @@ Hacl_HPKE_Curve51_CP32_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +619,44 @@ Hacl_HPKE_Curve51_CP32_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP128_SHA256.c b/src/Hacl_HPKE_Curve64_CP128_SHA256.c
index c22d5a64..742bc0da 100644
--- a/src/Hacl_HPKE_Curve64_CP128_SHA256.c
+++ b/src/Hacl_HPKE_Curve64_CP128_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve64_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve64_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP128_SHA512.c b/src/Hacl_HPKE_Curve64_CP128_SHA512.c
index d01bc1f8..915cc6ad 100644
--- a/src/Hacl_HPKE_Curve64_CP128_SHA512.c
+++ b/src/Hacl_HPKE_Curve64_CP128_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve64_CP128_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve64_CP128_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP256_SHA256.c b/src/Hacl_HPKE_Curve64_CP256_SHA256.c
index 6de7db47..c8f5148a 100644
--- a/src/Hacl_HPKE_Curve64_CP256_SHA256.c
+++ b/src/Hacl_HPKE_Curve64_CP256_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve64_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve64_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP256_SHA512.c b/src/Hacl_HPKE_Curve64_CP256_SHA512.c
index 146b64eb..2b581c0a 100644
--- a/src/Hacl_HPKE_Curve64_CP256_SHA512.c
+++ b/src/Hacl_HPKE_Curve64_CP256_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve64_CP256_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve64_CP256_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP32_SHA256.c b/src/Hacl_HPKE_Curve64_CP32_SHA256.c
index c7f168bb..7389f1f2 100644
--- a/src/Hacl_HPKE_Curve64_CP32_SHA256.c
+++ b/src/Hacl_HPKE_Curve64_CP32_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,43 +552,45 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +608,7 @@ Hacl_HPKE_Curve64_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +619,44 @@ Hacl_HPKE_Curve64_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_Curve64_CP32_SHA512.c b/src/Hacl_HPKE_Curve64_CP32_SHA512.c
index 39e1a267..2ec61714 100644
--- a/src/Hacl_HPKE_Curve64_CP32_SHA512.c
+++ b/src/Hacl_HPKE_Curve64_CP32_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp0[len0];
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp0[len0];
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp1[len1];
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp2[len2];
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp3[len3];
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp4[len4];
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,43 +552,45 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +608,7 @@ Hacl_HPKE_Curve64_CP32_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +619,44 @@ Hacl_HPKE_Curve64_CP32_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_P256_CP128_SHA256.c b/src/Hacl_HPKE_P256_CP128_SHA256.c
index 5320f1f5..54a217fd 100644
--- a/src/Hacl_HPKE_P256_CP128_SHA256.c
+++ b/src/Hacl_HPKE_P256_CP128_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP128_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP128_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp1[len0];
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp2[len1];
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp3[len2];
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp4[len3];
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp5[len4];
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,43 +565,45 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +621,7 @@ Hacl_HPKE_P256_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +632,44 @@ Hacl_HPKE_P256_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_P256_CP256_SHA256.c b/src/Hacl_HPKE_P256_CP256_SHA256.c
index 3603cb42..bccf781a 100644
--- a/src/Hacl_HPKE_P256_CP256_SHA256.c
+++ b/src/Hacl_HPKE_P256_CP256_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP256_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP256_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp1[len0];
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp2[len1];
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp3[len2];
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp4[len3];
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp5[len4];
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,43 +565,45 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +621,7 @@ Hacl_HPKE_P256_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +632,44 @@ Hacl_HPKE_P256_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_HPKE_P256_CP32_SHA256.c b/src/Hacl_HPKE_P256_CP32_SHA256.c
index 5297dd2c..fc132641 100644
--- a/src/Hacl_HPKE_P256_CP32_SHA256.c
+++ b/src/Hacl_HPKE_P256_CP32_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP32_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t tmp0[len0];
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t tmp1[len1];
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t tmp2[len2];
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t tmp3[len3];
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t tmp4[len4];
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t tmp[len];
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP32_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t tmp1[len0];
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t tmp[len];
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t tmp1[len0];
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t tmp2[len1];
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t tmp3[len2];
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t tmp4[len3];
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t tmp5[len4];
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t tmp[len];
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,43 +565,45 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +621,7 @@ Hacl_HPKE_P256_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +632,44 @@ Hacl_HPKE_P256_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_Hash_Base.c b/src/Hacl_Hash_Base.c
index 40796f14..02d893e3 100644
--- a/src/Hacl_Hash_Base.c
+++ b/src/Hacl_Hash_Base.c
@@ -31,27 +31,27 @@ uint32_t Hacl_Hash_Definitions_word_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     default:
       {
@@ -67,59 +67,59 @@ uint32_t Hacl_Hash_Definitions_block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     default:
       {
@@ -135,27 +135,27 @@ uint32_t Hacl_Hash_Definitions_hash_word_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)5U;
+        return 5U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)7U;
+        return 7U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)6U;
+        return 6U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     default:
       {
@@ -171,51 +171,51 @@ uint32_t Hacl_Hash_Definitions_hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)20U;
+        return 20U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
diff --git a/src/Hacl_Hash_Blake2.c b/src/Hacl_Hash_Blake2.c
deleted file mode 100644
index aecc6165..00000000
--- a/src/Hacl_Hash_Blake2.c
+++ /dev/null
@@ -1,1324 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "internal/Hacl_Hash_Blake2.h"
-
-#include "internal/Hacl_Impl_Blake2_Constants.h"
-#include "lib_memzero0.h"
-
-static void
-blake2b_update_block(
-  uint64_t *wv,
-  uint64_t *hash,
-  bool flag,
-  FStar_UInt128_uint128 totlen,
-  uint8_t *d
-)
-{
-  uint64_t m_w[16U] = { 0U };
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  uint64_t mask[4U] = { 0U };
-  uint64_t wv_14;
-  if (flag)
-  {
-    wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  }
-  else
-  {
-    wv_14 = (uint64_t)0U;
-  }
-  uint64_t wv_15 = (uint64_t)0U;
-  mask[0U] = FStar_UInt128_uint128_to_uint64(totlen);
-  mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U));
-  mask[2U] = wv_14;
-  mask[3U] = wv_15;
-  memcpy(wv, hash, (uint32_t)16U * sizeof (uint64_t));
-  uint64_t *wv3 = wv + (uint32_t)12U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = wv3;
-    uint64_t x = wv3[i] ^ mask[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR12(i0,
-    (uint32_t)0U,
-    (uint32_t)12U,
-    (uint32_t)1U,
-    uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U;
-    uint64_t m_st[16U] = { 0U };
-    uint64_t *r0 = m_st;
-    uint64_t *r1 = m_st + (uint32_t)4U;
-    uint64_t *r20 = m_st + (uint32_t)8U;
-    uint64_t *r30 = m_st + (uint32_t)12U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
-    uint64_t uu____0 = m_w[s2];
-    uint64_t uu____1 = m_w[s4];
-    uint64_t uu____2 = m_w[s6];
-    r0[0U] = m_w[s0];
-    r0[1U] = uu____0;
-    r0[2U] = uu____1;
-    r0[3U] = uu____2;
-    uint64_t uu____3 = m_w[s3];
-    uint64_t uu____4 = m_w[s5];
-    uint64_t uu____5 = m_w[s7];
-    r1[0U] = m_w[s1];
-    r1[1U] = uu____3;
-    r1[2U] = uu____4;
-    r1[3U] = uu____5;
-    uint64_t uu____6 = m_w[s10];
-    uint64_t uu____7 = m_w[s12];
-    uint64_t uu____8 = m_w[s14];
-    r20[0U] = m_w[s8];
-    r20[1U] = uu____6;
-    r20[2U] = uu____7;
-    r20[3U] = uu____8;
-    uint64_t uu____9 = m_w[s11];
-    uint64_t uu____10 = m_w[s13];
-    uint64_t uu____11 = m_w[s15];
-    r30[0U] = m_w[s9];
-    r30[1U] = uu____9;
-    r30[2U] = uu____10;
-    r30[3U] = uu____11;
-    uint64_t *x = m_st;
-    uint64_t *y = m_st + (uint32_t)4U;
-    uint64_t *z = m_st + (uint32_t)8U;
-    uint64_t *w = m_st + (uint32_t)12U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    uint64_t *wv_a0 = wv + a * (uint32_t)4U;
-    uint64_t *wv_b0 = wv + b0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a0;
-      uint64_t x1 = wv_a0[i] + wv_b0[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a0;
-      uint64_t x1 = wv_a0[i] + x[i];
-      os[i] = x1;);
-    uint64_t *wv_a1 = wv + d10 * (uint32_t)4U;
-    uint64_t *wv_b1 = wv + a * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a1;
-      uint64_t x1 = wv_a1[i] ^ wv_b1[i];
-      os[i] = x1;);
-    uint64_t *r10 = wv_a1;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r10;
-      uint64_t x1 = r10[i];
-      uint64_t x10 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U;
-      os[i] = x10;);
-    uint64_t *wv_a2 = wv + c0 * (uint32_t)4U;
-    uint64_t *wv_b2 = wv + d10 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a2;
-      uint64_t x1 = wv_a2[i] + wv_b2[i];
-      os[i] = x1;);
-    uint64_t *wv_a3 = wv + b0 * (uint32_t)4U;
-    uint64_t *wv_b3 = wv + c0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a3;
-      uint64_t x1 = wv_a3[i] ^ wv_b3[i];
-      os[i] = x1;);
-    uint64_t *r12 = wv_a3;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r12;
-      uint64_t x1 = r12[i];
-      uint64_t x10 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U;
-      os[i] = x10;);
-    uint64_t *wv_a4 = wv + a * (uint32_t)4U;
-    uint64_t *wv_b4 = wv + b0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a4;
-      uint64_t x1 = wv_a4[i] + wv_b4[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a4;
-      uint64_t x1 = wv_a4[i] + y[i];
-      os[i] = x1;);
-    uint64_t *wv_a5 = wv + d10 * (uint32_t)4U;
-    uint64_t *wv_b5 = wv + a * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a5;
-      uint64_t x1 = wv_a5[i] ^ wv_b5[i];
-      os[i] = x1;);
-    uint64_t *r13 = wv_a5;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r13;
-      uint64_t x1 = r13[i];
-      uint64_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U;
-      os[i] = x10;);
-    uint64_t *wv_a6 = wv + c0 * (uint32_t)4U;
-    uint64_t *wv_b6 = wv + d10 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a6;
-      uint64_t x1 = wv_a6[i] + wv_b6[i];
-      os[i] = x1;);
-    uint64_t *wv_a7 = wv + b0 * (uint32_t)4U;
-    uint64_t *wv_b7 = wv + c0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a7;
-      uint64_t x1 = wv_a7[i] ^ wv_b7[i];
-      os[i] = x1;);
-    uint64_t *r14 = wv_a7;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r14;
-      uint64_t x1 = r14[i];
-      uint64_t x10 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U;
-      os[i] = x10;);
-    uint64_t *r15 = wv + (uint32_t)4U;
-    uint64_t *r21 = wv + (uint32_t)8U;
-    uint64_t *r31 = wv + (uint32_t)12U;
-    uint64_t *r110 = r15;
-    uint64_t x00 = r110[1U];
-    uint64_t x10 = r110[2U];
-    uint64_t x20 = r110[3U];
-    uint64_t x30 = r110[0U];
-    r110[0U] = x00;
-    r110[1U] = x10;
-    r110[2U] = x20;
-    r110[3U] = x30;
-    uint64_t *r111 = r21;
-    uint64_t x01 = r111[2U];
-    uint64_t x11 = r111[3U];
-    uint64_t x21 = r111[0U];
-    uint64_t x31 = r111[1U];
-    r111[0U] = x01;
-    r111[1U] = x11;
-    r111[2U] = x21;
-    r111[3U] = x31;
-    uint64_t *r112 = r31;
-    uint64_t x02 = r112[3U];
-    uint64_t x12 = r112[0U];
-    uint64_t x22 = r112[1U];
-    uint64_t x32 = r112[2U];
-    r112[0U] = x02;
-    r112[1U] = x12;
-    r112[2U] = x22;
-    r112[3U] = x32;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    uint64_t *wv_a = wv + a0 * (uint32_t)4U;
-    uint64_t *wv_b8 = wv + b * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a;
-      uint64_t x1 = wv_a[i] + wv_b8[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a;
-      uint64_t x1 = wv_a[i] + z[i];
-      os[i] = x1;);
-    uint64_t *wv_a8 = wv + d1 * (uint32_t)4U;
-    uint64_t *wv_b9 = wv + a0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a8;
-      uint64_t x1 = wv_a8[i] ^ wv_b9[i];
-      os[i] = x1;);
-    uint64_t *r16 = wv_a8;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r16;
-      uint64_t x1 = r16[i];
-      uint64_t x13 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U;
-      os[i] = x13;);
-    uint64_t *wv_a9 = wv + c * (uint32_t)4U;
-    uint64_t *wv_b10 = wv + d1 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a9;
-      uint64_t x1 = wv_a9[i] + wv_b10[i];
-      os[i] = x1;);
-    uint64_t *wv_a10 = wv + b * (uint32_t)4U;
-    uint64_t *wv_b11 = wv + c * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a10;
-      uint64_t x1 = wv_a10[i] ^ wv_b11[i];
-      os[i] = x1;);
-    uint64_t *r17 = wv_a10;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r17;
-      uint64_t x1 = r17[i];
-      uint64_t x13 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U;
-      os[i] = x13;);
-    uint64_t *wv_a11 = wv + a0 * (uint32_t)4U;
-    uint64_t *wv_b12 = wv + b * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a11;
-      uint64_t x1 = wv_a11[i] + wv_b12[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a11;
-      uint64_t x1 = wv_a11[i] + w[i];
-      os[i] = x1;);
-    uint64_t *wv_a12 = wv + d1 * (uint32_t)4U;
-    uint64_t *wv_b13 = wv + a0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a12;
-      uint64_t x1 = wv_a12[i] ^ wv_b13[i];
-      os[i] = x1;);
-    uint64_t *r18 = wv_a12;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r18;
-      uint64_t x1 = r18[i];
-      uint64_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U;
-      os[i] = x13;);
-    uint64_t *wv_a13 = wv + c * (uint32_t)4U;
-    uint64_t *wv_b14 = wv + d1 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a13;
-      uint64_t x1 = wv_a13[i] + wv_b14[i];
-      os[i] = x1;);
-    uint64_t *wv_a14 = wv + b * (uint32_t)4U;
-    uint64_t *wv_b = wv + c * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a14;
-      uint64_t x1 = wv_a14[i] ^ wv_b[i];
-      os[i] = x1;);
-    uint64_t *r19 = wv_a14;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r19;
-      uint64_t x1 = r19[i];
-      uint64_t x13 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U;
-      os[i] = x13;);
-    uint64_t *r113 = wv + (uint32_t)4U;
-    uint64_t *r2 = wv + (uint32_t)8U;
-    uint64_t *r3 = wv + (uint32_t)12U;
-    uint64_t *r11 = r113;
-    uint64_t x03 = r11[3U];
-    uint64_t x13 = r11[0U];
-    uint64_t x23 = r11[1U];
-    uint64_t x33 = r11[2U];
-    r11[0U] = x03;
-    r11[1U] = x13;
-    r11[2U] = x23;
-    r11[3U] = x33;
-    uint64_t *r114 = r2;
-    uint64_t x04 = r114[2U];
-    uint64_t x14 = r114[3U];
-    uint64_t x24 = r114[0U];
-    uint64_t x34 = r114[1U];
-    r114[0U] = x04;
-    r114[1U] = x14;
-    r114[2U] = x24;
-    r114[3U] = x34;
-    uint64_t *r115 = r3;
-    uint64_t x0 = r115[1U];
-    uint64_t x1 = r115[2U];
-    uint64_t x2 = r115[3U];
-    uint64_t x3 = r115[0U];
-    r115[0U] = x0;
-    r115[1U] = x1;
-    r115[2U] = x2;
-    r115[3U] = x3;);
-  uint64_t *s0 = hash;
-  uint64_t *s1 = hash + (uint32_t)4U;
-  uint64_t *r0 = wv;
-  uint64_t *r1 = wv + (uint32_t)4U;
-  uint64_t *r2 = wv + (uint32_t)8U;
-  uint64_t *r3 = wv + (uint32_t)12U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = s0;
-    uint64_t x = s0[i] ^ r0[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = s0;
-    uint64_t x = s0[i] ^ r2[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = s1;
-    uint64_t x = s1[i] ^ r1[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = s1;
-    uint64_t x = s1[i] ^ r3[i];
-    os[i] = x;);
-}
-
-void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn)
-{
-  uint64_t *r0 = hash;
-  uint64_t *r1 = hash + (uint32_t)4U;
-  uint64_t *r2 = hash + (uint32_t)8U;
-  uint64_t *r3 = hash + (uint32_t)12U;
-  uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U];
-  uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U];
-  uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U];
-  uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U];
-  uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U];
-  uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U];
-  uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U];
-  uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U];
-  r2[0U] = iv0;
-  r2[1U] = iv1;
-  r2[2U] = iv2;
-  r2[3U] = iv3;
-  r3[0U] = iv4;
-  r3[1U] = iv5;
-  r3[2U] = iv6;
-  r3[3U] = iv7;
-  uint64_t kk_shift_8 = (uint64_t)kk << (uint32_t)8U;
-  uint64_t iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn));
-  r0[0U] = iv0_;
-  r0[1U] = iv1;
-  r0[2U] = iv2;
-  r0[3U] = iv3;
-  r1[0U] = iv4;
-  r1[1U] = iv5;
-  r1[2U] = iv6;
-  r1[3U] = iv7;
-}
-
-void
-Hacl_Blake2b_32_blake2b_update_key(
-  uint64_t *wv,
-  uint64_t *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-)
-{
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  uint8_t b[128U] = { 0U };
-  memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
-  {
-    blake2b_update_block(wv, hash, true, lb, b);
-  }
-  else
-  {
-    blake2b_update_block(wv, hash, false, lb, b);
-  }
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
-}
-
-void
-Hacl_Blake2b_32_blake2b_update_multi(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks,
-  uint32_t nb
-)
-{
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    FStar_UInt128_uint128
-    totlen =
-      FStar_UInt128_add_mod(prev,
-        FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U)));
-    uint8_t *b = blocks + i * (uint32_t)128U;
-    blake2b_update_block(wv, hash, false, totlen, b);
-  }
-}
-
-void
-Hacl_Blake2b_32_blake2b_update_last(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint32_t rem,
-  uint8_t *d
-)
-{
-  uint8_t b[128U] = { 0U };
-  uint8_t *last = d + len - rem;
-  memcpy(b, last, rem * sizeof (uint8_t));
-  FStar_UInt128_uint128
-  totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
-  blake2b_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
-}
-
-static void
-blake2b_update_blocks(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks
-)
-{
-  uint32_t nb0 = len / (uint32_t)128U;
-  uint32_t rem0 = len % (uint32_t)128U;
-  K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
-  {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)128U;
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
-  }
-  else
-  {
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb0, .snd = rem0 });
-  }
-  uint32_t nb = scrut.fst;
-  uint32_t rem = scrut.snd;
-  Hacl_Blake2b_32_blake2b_update_multi(len, wv, hash, prev, blocks, nb);
-  Hacl_Blake2b_32_blake2b_update_last(len, wv, hash, prev, rem, blocks);
-}
-
-static inline void
-blake2b_update(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
-{
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  if (kk > (uint32_t)0U)
-  {
-    Hacl_Blake2b_32_blake2b_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
-    {
-      blake2b_update_blocks(ll, wv, hash, lb, d);
-      return;
-    }
-    return;
-  }
-  blake2b_update_blocks(ll,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U),
-    d);
-}
-
-void Hacl_Blake2b_32_blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash)
-{
-  uint8_t b[64U] = { 0U };
-  uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)32U;
-  uint64_t *row0 = hash;
-  uint64_t *row1 = hash + (uint32_t)4U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(first + i * (uint32_t)8U, row0[i]););
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(second + i * (uint32_t)8U, row1[i]););
-  uint8_t *final = b;
-  memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-/**
-Write the BLAKE2b digest of message `d` using key `k` into `output`.
-
-@param nn Length of the to-be-generated digest with 1 <= `nn` <= 64.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2b_32_blake2b(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-)
-{
-  uint64_t b[16U] = { 0U };
-  uint64_t b1[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(b, kk, nn);
-  blake2b_update(b1, b, kk, k, ll, d);
-  Hacl_Blake2b_32_blake2b_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)16U, uint64_t);
-  Lib_Memzero0_memzero(b, (uint32_t)16U, uint64_t);
-}
-
-uint64_t *Hacl_Blake2b_32_blake2b_malloc(void)
-{
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
-  return buf;
-}
-
-static inline void
-blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t *d)
-{
-  uint32_t m_w[16U] = { 0U };
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint32_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  uint32_t mask[4U] = { 0U };
-  uint32_t wv_14;
-  if (flag)
-  {
-    wv_14 = (uint32_t)0xFFFFFFFFU;
-  }
-  else
-  {
-    wv_14 = (uint32_t)0U;
-  }
-  uint32_t wv_15 = (uint32_t)0U;
-  mask[0U] = (uint32_t)totlen;
-  mask[1U] = (uint32_t)(totlen >> (uint32_t)32U);
-  mask[2U] = wv_14;
-  mask[3U] = wv_15;
-  memcpy(wv, hash, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t *wv3 = wv + (uint32_t)12U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = wv3;
-    uint32_t x = wv3[i] ^ mask[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR10(i0,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
-    uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U;
-    uint32_t m_st[16U] = { 0U };
-    uint32_t *r0 = m_st;
-    uint32_t *r1 = m_st + (uint32_t)4U;
-    uint32_t *r20 = m_st + (uint32_t)8U;
-    uint32_t *r30 = m_st + (uint32_t)12U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
-    uint32_t uu____0 = m_w[s2];
-    uint32_t uu____1 = m_w[s4];
-    uint32_t uu____2 = m_w[s6];
-    r0[0U] = m_w[s0];
-    r0[1U] = uu____0;
-    r0[2U] = uu____1;
-    r0[3U] = uu____2;
-    uint32_t uu____3 = m_w[s3];
-    uint32_t uu____4 = m_w[s5];
-    uint32_t uu____5 = m_w[s7];
-    r1[0U] = m_w[s1];
-    r1[1U] = uu____3;
-    r1[2U] = uu____4;
-    r1[3U] = uu____5;
-    uint32_t uu____6 = m_w[s10];
-    uint32_t uu____7 = m_w[s12];
-    uint32_t uu____8 = m_w[s14];
-    r20[0U] = m_w[s8];
-    r20[1U] = uu____6;
-    r20[2U] = uu____7;
-    r20[3U] = uu____8;
-    uint32_t uu____9 = m_w[s11];
-    uint32_t uu____10 = m_w[s13];
-    uint32_t uu____11 = m_w[s15];
-    r30[0U] = m_w[s9];
-    r30[1U] = uu____9;
-    r30[2U] = uu____10;
-    r30[3U] = uu____11;
-    uint32_t *x = m_st;
-    uint32_t *y = m_st + (uint32_t)4U;
-    uint32_t *z = m_st + (uint32_t)8U;
-    uint32_t *w = m_st + (uint32_t)12U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    uint32_t *wv_a0 = wv + a * (uint32_t)4U;
-    uint32_t *wv_b0 = wv + b0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a0;
-      uint32_t x1 = wv_a0[i] + wv_b0[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a0;
-      uint32_t x1 = wv_a0[i] + x[i];
-      os[i] = x1;);
-    uint32_t *wv_a1 = wv + d10 * (uint32_t)4U;
-    uint32_t *wv_b1 = wv + a * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a1;
-      uint32_t x1 = wv_a1[i] ^ wv_b1[i];
-      os[i] = x1;);
-    uint32_t *r10 = wv_a1;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r10;
-      uint32_t x1 = r10[i];
-      uint32_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U;
-      os[i] = x10;);
-    uint32_t *wv_a2 = wv + c0 * (uint32_t)4U;
-    uint32_t *wv_b2 = wv + d10 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a2;
-      uint32_t x1 = wv_a2[i] + wv_b2[i];
-      os[i] = x1;);
-    uint32_t *wv_a3 = wv + b0 * (uint32_t)4U;
-    uint32_t *wv_b3 = wv + c0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a3;
-      uint32_t x1 = wv_a3[i] ^ wv_b3[i];
-      os[i] = x1;);
-    uint32_t *r12 = wv_a3;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r12;
-      uint32_t x1 = r12[i];
-      uint32_t x10 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U;
-      os[i] = x10;);
-    uint32_t *wv_a4 = wv + a * (uint32_t)4U;
-    uint32_t *wv_b4 = wv + b0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a4;
-      uint32_t x1 = wv_a4[i] + wv_b4[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a4;
-      uint32_t x1 = wv_a4[i] + y[i];
-      os[i] = x1;);
-    uint32_t *wv_a5 = wv + d10 * (uint32_t)4U;
-    uint32_t *wv_b5 = wv + a * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a5;
-      uint32_t x1 = wv_a5[i] ^ wv_b5[i];
-      os[i] = x1;);
-    uint32_t *r13 = wv_a5;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r13;
-      uint32_t x1 = r13[i];
-      uint32_t x10 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U;
-      os[i] = x10;);
-    uint32_t *wv_a6 = wv + c0 * (uint32_t)4U;
-    uint32_t *wv_b6 = wv + d10 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a6;
-      uint32_t x1 = wv_a6[i] + wv_b6[i];
-      os[i] = x1;);
-    uint32_t *wv_a7 = wv + b0 * (uint32_t)4U;
-    uint32_t *wv_b7 = wv + c0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a7;
-      uint32_t x1 = wv_a7[i] ^ wv_b7[i];
-      os[i] = x1;);
-    uint32_t *r14 = wv_a7;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r14;
-      uint32_t x1 = r14[i];
-      uint32_t x10 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U;
-      os[i] = x10;);
-    uint32_t *r15 = wv + (uint32_t)4U;
-    uint32_t *r21 = wv + (uint32_t)8U;
-    uint32_t *r31 = wv + (uint32_t)12U;
-    uint32_t *r110 = r15;
-    uint32_t x00 = r110[1U];
-    uint32_t x10 = r110[2U];
-    uint32_t x20 = r110[3U];
-    uint32_t x30 = r110[0U];
-    r110[0U] = x00;
-    r110[1U] = x10;
-    r110[2U] = x20;
-    r110[3U] = x30;
-    uint32_t *r111 = r21;
-    uint32_t x01 = r111[2U];
-    uint32_t x11 = r111[3U];
-    uint32_t x21 = r111[0U];
-    uint32_t x31 = r111[1U];
-    r111[0U] = x01;
-    r111[1U] = x11;
-    r111[2U] = x21;
-    r111[3U] = x31;
-    uint32_t *r112 = r31;
-    uint32_t x02 = r112[3U];
-    uint32_t x12 = r112[0U];
-    uint32_t x22 = r112[1U];
-    uint32_t x32 = r112[2U];
-    r112[0U] = x02;
-    r112[1U] = x12;
-    r112[2U] = x22;
-    r112[3U] = x32;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    uint32_t *wv_a = wv + a0 * (uint32_t)4U;
-    uint32_t *wv_b8 = wv + b * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a;
-      uint32_t x1 = wv_a[i] + wv_b8[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a;
-      uint32_t x1 = wv_a[i] + z[i];
-      os[i] = x1;);
-    uint32_t *wv_a8 = wv + d1 * (uint32_t)4U;
-    uint32_t *wv_b9 = wv + a0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a8;
-      uint32_t x1 = wv_a8[i] ^ wv_b9[i];
-      os[i] = x1;);
-    uint32_t *r16 = wv_a8;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r16;
-      uint32_t x1 = r16[i];
-      uint32_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U;
-      os[i] = x13;);
-    uint32_t *wv_a9 = wv + c * (uint32_t)4U;
-    uint32_t *wv_b10 = wv + d1 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a9;
-      uint32_t x1 = wv_a9[i] + wv_b10[i];
-      os[i] = x1;);
-    uint32_t *wv_a10 = wv + b * (uint32_t)4U;
-    uint32_t *wv_b11 = wv + c * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a10;
-      uint32_t x1 = wv_a10[i] ^ wv_b11[i];
-      os[i] = x1;);
-    uint32_t *r17 = wv_a10;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r17;
-      uint32_t x1 = r17[i];
-      uint32_t x13 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U;
-      os[i] = x13;);
-    uint32_t *wv_a11 = wv + a0 * (uint32_t)4U;
-    uint32_t *wv_b12 = wv + b * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a11;
-      uint32_t x1 = wv_a11[i] + wv_b12[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a11;
-      uint32_t x1 = wv_a11[i] + w[i];
-      os[i] = x1;);
-    uint32_t *wv_a12 = wv + d1 * (uint32_t)4U;
-    uint32_t *wv_b13 = wv + a0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a12;
-      uint32_t x1 = wv_a12[i] ^ wv_b13[i];
-      os[i] = x1;);
-    uint32_t *r18 = wv_a12;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r18;
-      uint32_t x1 = r18[i];
-      uint32_t x13 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U;
-      os[i] = x13;);
-    uint32_t *wv_a13 = wv + c * (uint32_t)4U;
-    uint32_t *wv_b14 = wv + d1 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a13;
-      uint32_t x1 = wv_a13[i] + wv_b14[i];
-      os[i] = x1;);
-    uint32_t *wv_a14 = wv + b * (uint32_t)4U;
-    uint32_t *wv_b = wv + c * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a14;
-      uint32_t x1 = wv_a14[i] ^ wv_b[i];
-      os[i] = x1;);
-    uint32_t *r19 = wv_a14;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r19;
-      uint32_t x1 = r19[i];
-      uint32_t x13 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U;
-      os[i] = x13;);
-    uint32_t *r113 = wv + (uint32_t)4U;
-    uint32_t *r2 = wv + (uint32_t)8U;
-    uint32_t *r3 = wv + (uint32_t)12U;
-    uint32_t *r11 = r113;
-    uint32_t x03 = r11[3U];
-    uint32_t x13 = r11[0U];
-    uint32_t x23 = r11[1U];
-    uint32_t x33 = r11[2U];
-    r11[0U] = x03;
-    r11[1U] = x13;
-    r11[2U] = x23;
-    r11[3U] = x33;
-    uint32_t *r114 = r2;
-    uint32_t x04 = r114[2U];
-    uint32_t x14 = r114[3U];
-    uint32_t x24 = r114[0U];
-    uint32_t x34 = r114[1U];
-    r114[0U] = x04;
-    r114[1U] = x14;
-    r114[2U] = x24;
-    r114[3U] = x34;
-    uint32_t *r115 = r3;
-    uint32_t x0 = r115[1U];
-    uint32_t x1 = r115[2U];
-    uint32_t x2 = r115[3U];
-    uint32_t x3 = r115[0U];
-    r115[0U] = x0;
-    r115[1U] = x1;
-    r115[2U] = x2;
-    r115[3U] = x3;);
-  uint32_t *s0 = hash;
-  uint32_t *s1 = hash + (uint32_t)4U;
-  uint32_t *r0 = wv;
-  uint32_t *r1 = wv + (uint32_t)4U;
-  uint32_t *r2 = wv + (uint32_t)8U;
-  uint32_t *r3 = wv + (uint32_t)12U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = s0;
-    uint32_t x = s0[i] ^ r0[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = s0;
-    uint32_t x = s0[i] ^ r2[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = s1;
-    uint32_t x = s1[i] ^ r1[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = s1;
-    uint32_t x = s1[i] ^ r3[i];
-    os[i] = x;);
-}
-
-void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn)
-{
-  uint32_t *r0 = hash;
-  uint32_t *r1 = hash + (uint32_t)4U;
-  uint32_t *r2 = hash + (uint32_t)8U;
-  uint32_t *r3 = hash + (uint32_t)12U;
-  uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U];
-  uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U];
-  uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U];
-  uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U];
-  uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U];
-  uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U];
-  uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U];
-  uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U];
-  r2[0U] = iv0;
-  r2[1U] = iv1;
-  r2[2U] = iv2;
-  r2[3U] = iv3;
-  r3[0U] = iv4;
-  r3[1U] = iv5;
-  r3[2U] = iv6;
-  r3[3U] = iv7;
-  uint32_t kk_shift_8 = kk << (uint32_t)8U;
-  uint32_t iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn));
-  r0[0U] = iv0_;
-  r0[1U] = iv1;
-  r0[2U] = iv2;
-  r0[3U] = iv3;
-  r1[0U] = iv4;
-  r1[1U] = iv5;
-  r1[2U] = iv6;
-  r1[3U] = iv7;
-}
-
-void
-Hacl_Blake2s_32_blake2s_update_key(
-  uint32_t *wv,
-  uint32_t *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-)
-{
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  uint8_t b[64U] = { 0U };
-  memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
-  {
-    blake2s_update_block(wv, hash, true, lb, b);
-  }
-  else
-  {
-    blake2s_update_block(wv, hash, false, lb, b);
-  }
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-void
-Hacl_Blake2s_32_blake2s_update_multi(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint8_t *blocks,
-  uint32_t nb
-)
-{
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U);
-    uint8_t *b = blocks + i * (uint32_t)64U;
-    blake2s_update_block(wv, hash, false, totlen, b);
-  }
-}
-
-void
-Hacl_Blake2s_32_blake2s_update_last(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint32_t rem,
-  uint8_t *d
-)
-{
-  uint8_t b[64U] = { 0U };
-  uint8_t *last = d + len - rem;
-  memcpy(b, last, rem * sizeof (uint8_t));
-  uint64_t totlen = prev + (uint64_t)len;
-  blake2s_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-static void
-blake2s_update_blocks(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint8_t *blocks
-)
-{
-  uint32_t nb0 = len / (uint32_t)64U;
-  uint32_t rem0 = len % (uint32_t)64U;
-  K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
-  {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)64U;
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
-  }
-  else
-  {
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb0, .snd = rem0 });
-  }
-  uint32_t nb = scrut.fst;
-  uint32_t rem = scrut.snd;
-  Hacl_Blake2s_32_blake2s_update_multi(len, wv, hash, prev, blocks, nb);
-  Hacl_Blake2s_32_blake2s_update_last(len, wv, hash, prev, rem, blocks);
-}
-
-static inline void
-blake2s_update(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
-{
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  if (kk > (uint32_t)0U)
-  {
-    Hacl_Blake2s_32_blake2s_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
-    {
-      blake2s_update_blocks(ll, wv, hash, lb, d);
-      return;
-    }
-    return;
-  }
-  blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d);
-}
-
-void Hacl_Blake2s_32_blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash)
-{
-  uint8_t b[32U] = { 0U };
-  uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)16U;
-  uint32_t *row0 = hash;
-  uint32_t *row1 = hash + (uint32_t)4U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(first + i * (uint32_t)4U, row0[i]););
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(second + i * (uint32_t)4U, row1[i]););
-  uint8_t *final = b;
-  memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)32U, uint8_t);
-}
-
-/**
-Write the BLAKE2s digest of message `d` using key `k` into `output`.
-
-@param nn Length of to-be-generated digest with 1 <= `nn` <= 32.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2s_32_blake2s(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-)
-{
-  uint32_t b[16U] = { 0U };
-  uint32_t b1[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(b, kk, nn);
-  blake2s_update(b1, b, kk, k, ll, d);
-  Hacl_Blake2s_32_blake2s_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)16U, uint32_t);
-  Lib_Memzero0_memzero(b, (uint32_t)16U, uint32_t);
-}
-
-uint32_t *Hacl_Blake2s_32_blake2s_malloc(void)
-{
-  uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
-  return buf;
-}
-
diff --git a/src/Hacl_Hash_Blake2b.c b/src/Hacl_Hash_Blake2b.c
new file mode 100644
index 00000000..2dceaf4b
--- /dev/null
+++ b/src/Hacl_Hash_Blake2b.c
@@ -0,0 +1,971 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_Blake2b.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "lib_memzero0.h"
+
+static void
+update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totlen, uint8_t *d)
+{
+  uint64_t m_w[16U] = { 0U };
+  KRML_MAYBE_FOR16(i,
+    0U,
+    16U,
+    1U,
+    uint64_t *os = m_w;
+    uint8_t *bj = d + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  uint64_t mask[4U] = { 0U };
+  uint64_t wv_14;
+  if (flag)
+  {
+    wv_14 = 0xFFFFFFFFFFFFFFFFULL;
+  }
+  else
+  {
+    wv_14 = 0ULL;
+  }
+  uint64_t wv_15 = 0ULL;
+  mask[0U] = FStar_UInt128_uint128_to_uint64(totlen);
+  mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U));
+  mask[2U] = wv_14;
+  mask[3U] = wv_15;
+  memcpy(wv, hash, 16U * sizeof (uint64_t));
+  uint64_t *wv3 = wv + 12U;
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = wv3;
+    uint64_t x = wv3[i] ^ mask[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR12(i0,
+    0U,
+    12U,
+    1U,
+    uint32_t start_idx = i0 % 10U * 16U;
+    uint64_t m_st[16U] = { 0U };
+    uint64_t *r0 = m_st;
+    uint64_t *r1 = m_st + 4U;
+    uint64_t *r20 = m_st + 8U;
+    uint64_t *r30 = m_st + 12U;
+    uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U];
+    uint64_t uu____0 = m_w[s2];
+    uint64_t uu____1 = m_w[s4];
+    uint64_t uu____2 = m_w[s6];
+    r0[0U] = m_w[s0];
+    r0[1U] = uu____0;
+    r0[2U] = uu____1;
+    r0[3U] = uu____2;
+    uint64_t uu____3 = m_w[s3];
+    uint64_t uu____4 = m_w[s5];
+    uint64_t uu____5 = m_w[s7];
+    r1[0U] = m_w[s1];
+    r1[1U] = uu____3;
+    r1[2U] = uu____4;
+    r1[3U] = uu____5;
+    uint64_t uu____6 = m_w[s10];
+    uint64_t uu____7 = m_w[s12];
+    uint64_t uu____8 = m_w[s14];
+    r20[0U] = m_w[s8];
+    r20[1U] = uu____6;
+    r20[2U] = uu____7;
+    r20[3U] = uu____8;
+    uint64_t uu____9 = m_w[s11];
+    uint64_t uu____10 = m_w[s13];
+    uint64_t uu____11 = m_w[s15];
+    r30[0U] = m_w[s9];
+    r30[1U] = uu____9;
+    r30[2U] = uu____10;
+    r30[3U] = uu____11;
+    uint64_t *x = m_st;
+    uint64_t *y = m_st + 4U;
+    uint64_t *z = m_st + 8U;
+    uint64_t *w = m_st + 12U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    uint64_t *wv_a0 = wv + a * 4U;
+    uint64_t *wv_b0 = wv + b0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a0;
+      uint64_t x1 = wv_a0[i] + wv_b0[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a0;
+      uint64_t x1 = wv_a0[i] + x[i];
+      os[i] = x1;);
+    uint64_t *wv_a1 = wv + d10 * 4U;
+    uint64_t *wv_b1 = wv + a * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a1;
+      uint64_t x1 = wv_a1[i] ^ wv_b1[i];
+      os[i] = x1;);
+    uint64_t *r10 = wv_a1;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r10;
+      uint64_t x1 = r10[i];
+      uint64_t x10 = x1 >> 32U | x1 << 32U;
+      os[i] = x10;);
+    uint64_t *wv_a2 = wv + c0 * 4U;
+    uint64_t *wv_b2 = wv + d10 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a2;
+      uint64_t x1 = wv_a2[i] + wv_b2[i];
+      os[i] = x1;);
+    uint64_t *wv_a3 = wv + b0 * 4U;
+    uint64_t *wv_b3 = wv + c0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a3;
+      uint64_t x1 = wv_a3[i] ^ wv_b3[i];
+      os[i] = x1;);
+    uint64_t *r12 = wv_a3;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r12;
+      uint64_t x1 = r12[i];
+      uint64_t x10 = x1 >> 24U | x1 << 40U;
+      os[i] = x10;);
+    uint64_t *wv_a4 = wv + a * 4U;
+    uint64_t *wv_b4 = wv + b0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a4;
+      uint64_t x1 = wv_a4[i] + wv_b4[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a4;
+      uint64_t x1 = wv_a4[i] + y[i];
+      os[i] = x1;);
+    uint64_t *wv_a5 = wv + d10 * 4U;
+    uint64_t *wv_b5 = wv + a * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a5;
+      uint64_t x1 = wv_a5[i] ^ wv_b5[i];
+      os[i] = x1;);
+    uint64_t *r13 = wv_a5;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r13;
+      uint64_t x1 = r13[i];
+      uint64_t x10 = x1 >> 16U | x1 << 48U;
+      os[i] = x10;);
+    uint64_t *wv_a6 = wv + c0 * 4U;
+    uint64_t *wv_b6 = wv + d10 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a6;
+      uint64_t x1 = wv_a6[i] + wv_b6[i];
+      os[i] = x1;);
+    uint64_t *wv_a7 = wv + b0 * 4U;
+    uint64_t *wv_b7 = wv + c0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a7;
+      uint64_t x1 = wv_a7[i] ^ wv_b7[i];
+      os[i] = x1;);
+    uint64_t *r14 = wv_a7;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r14;
+      uint64_t x1 = r14[i];
+      uint64_t x10 = x1 >> 63U | x1 << 1U;
+      os[i] = x10;);
+    uint64_t *r15 = wv + 4U;
+    uint64_t *r21 = wv + 8U;
+    uint64_t *r31 = wv + 12U;
+    uint64_t *r110 = r15;
+    uint64_t x00 = r110[1U];
+    uint64_t x10 = r110[2U];
+    uint64_t x20 = r110[3U];
+    uint64_t x30 = r110[0U];
+    r110[0U] = x00;
+    r110[1U] = x10;
+    r110[2U] = x20;
+    r110[3U] = x30;
+    uint64_t *r111 = r21;
+    uint64_t x01 = r111[2U];
+    uint64_t x11 = r111[3U];
+    uint64_t x21 = r111[0U];
+    uint64_t x31 = r111[1U];
+    r111[0U] = x01;
+    r111[1U] = x11;
+    r111[2U] = x21;
+    r111[3U] = x31;
+    uint64_t *r112 = r31;
+    uint64_t x02 = r112[3U];
+    uint64_t x12 = r112[0U];
+    uint64_t x22 = r112[1U];
+    uint64_t x32 = r112[2U];
+    r112[0U] = x02;
+    r112[1U] = x12;
+    r112[2U] = x22;
+    r112[3U] = x32;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    uint64_t *wv_a = wv + a0 * 4U;
+    uint64_t *wv_b8 = wv + b * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a;
+      uint64_t x1 = wv_a[i] + wv_b8[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a;
+      uint64_t x1 = wv_a[i] + z[i];
+      os[i] = x1;);
+    uint64_t *wv_a8 = wv + d1 * 4U;
+    uint64_t *wv_b9 = wv + a0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a8;
+      uint64_t x1 = wv_a8[i] ^ wv_b9[i];
+      os[i] = x1;);
+    uint64_t *r16 = wv_a8;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r16;
+      uint64_t x1 = r16[i];
+      uint64_t x13 = x1 >> 32U | x1 << 32U;
+      os[i] = x13;);
+    uint64_t *wv_a9 = wv + c * 4U;
+    uint64_t *wv_b10 = wv + d1 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a9;
+      uint64_t x1 = wv_a9[i] + wv_b10[i];
+      os[i] = x1;);
+    uint64_t *wv_a10 = wv + b * 4U;
+    uint64_t *wv_b11 = wv + c * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a10;
+      uint64_t x1 = wv_a10[i] ^ wv_b11[i];
+      os[i] = x1;);
+    uint64_t *r17 = wv_a10;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r17;
+      uint64_t x1 = r17[i];
+      uint64_t x13 = x1 >> 24U | x1 << 40U;
+      os[i] = x13;);
+    uint64_t *wv_a11 = wv + a0 * 4U;
+    uint64_t *wv_b12 = wv + b * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a11;
+      uint64_t x1 = wv_a11[i] + wv_b12[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a11;
+      uint64_t x1 = wv_a11[i] + w[i];
+      os[i] = x1;);
+    uint64_t *wv_a12 = wv + d1 * 4U;
+    uint64_t *wv_b13 = wv + a0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a12;
+      uint64_t x1 = wv_a12[i] ^ wv_b13[i];
+      os[i] = x1;);
+    uint64_t *r18 = wv_a12;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r18;
+      uint64_t x1 = r18[i];
+      uint64_t x13 = x1 >> 16U | x1 << 48U;
+      os[i] = x13;);
+    uint64_t *wv_a13 = wv + c * 4U;
+    uint64_t *wv_b14 = wv + d1 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a13;
+      uint64_t x1 = wv_a13[i] + wv_b14[i];
+      os[i] = x1;);
+    uint64_t *wv_a14 = wv + b * 4U;
+    uint64_t *wv_b = wv + c * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a14;
+      uint64_t x1 = wv_a14[i] ^ wv_b[i];
+      os[i] = x1;);
+    uint64_t *r19 = wv_a14;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r19;
+      uint64_t x1 = r19[i];
+      uint64_t x13 = x1 >> 63U | x1 << 1U;
+      os[i] = x13;);
+    uint64_t *r113 = wv + 4U;
+    uint64_t *r2 = wv + 8U;
+    uint64_t *r3 = wv + 12U;
+    uint64_t *r11 = r113;
+    uint64_t x03 = r11[3U];
+    uint64_t x13 = r11[0U];
+    uint64_t x23 = r11[1U];
+    uint64_t x33 = r11[2U];
+    r11[0U] = x03;
+    r11[1U] = x13;
+    r11[2U] = x23;
+    r11[3U] = x33;
+    uint64_t *r114 = r2;
+    uint64_t x04 = r114[2U];
+    uint64_t x14 = r114[3U];
+    uint64_t x24 = r114[0U];
+    uint64_t x34 = r114[1U];
+    r114[0U] = x04;
+    r114[1U] = x14;
+    r114[2U] = x24;
+    r114[3U] = x34;
+    uint64_t *r115 = r3;
+    uint64_t x0 = r115[1U];
+    uint64_t x1 = r115[2U];
+    uint64_t x2 = r115[3U];
+    uint64_t x3 = r115[0U];
+    r115[0U] = x0;
+    r115[1U] = x1;
+    r115[2U] = x2;
+    r115[3U] = x3;);
+  uint64_t *s0 = hash;
+  uint64_t *s1 = hash + 4U;
+  uint64_t *r0 = wv;
+  uint64_t *r1 = wv + 4U;
+  uint64_t *r2 = wv + 8U;
+  uint64_t *r3 = wv + 12U;
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = s0;
+    uint64_t x = s0[i] ^ r0[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = s0;
+    uint64_t x = s0[i] ^ r2[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = s1;
+    uint64_t x = s1[i] ^ r1[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = s1;
+    uint64_t x = s1[i] ^ r3[i];
+    os[i] = x;);
+}
+
+void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn)
+{
+  uint64_t *r0 = hash;
+  uint64_t *r1 = hash + 4U;
+  uint64_t *r2 = hash + 8U;
+  uint64_t *r3 = hash + 12U;
+  uint64_t iv0 = Hacl_Hash_Blake2s_ivTable_B[0U];
+  uint64_t iv1 = Hacl_Hash_Blake2s_ivTable_B[1U];
+  uint64_t iv2 = Hacl_Hash_Blake2s_ivTable_B[2U];
+  uint64_t iv3 = Hacl_Hash_Blake2s_ivTable_B[3U];
+  uint64_t iv4 = Hacl_Hash_Blake2s_ivTable_B[4U];
+  uint64_t iv5 = Hacl_Hash_Blake2s_ivTable_B[5U];
+  uint64_t iv6 = Hacl_Hash_Blake2s_ivTable_B[6U];
+  uint64_t iv7 = Hacl_Hash_Blake2s_ivTable_B[7U];
+  r2[0U] = iv0;
+  r2[1U] = iv1;
+  r2[2U] = iv2;
+  r2[3U] = iv3;
+  r3[0U] = iv4;
+  r3[1U] = iv5;
+  r3[2U] = iv6;
+  r3[3U] = iv7;
+  uint64_t kk_shift_8 = (uint64_t)kk << 8U;
+  uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn));
+  r0[0U] = iv0_;
+  r0[1U] = iv1;
+  r0[2U] = iv2;
+  r0[3U] = iv3;
+  r1[0U] = iv4;
+  r1[1U] = iv5;
+  r1[2U] = iv6;
+  r1[3U] = iv7;
+}
+
+static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll)
+{
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  uint8_t b[128U] = { 0U };
+  memcpy(b, k, kk * sizeof (uint8_t));
+  if (ll == 0U)
+  {
+    update_block(wv, hash, true, lb, b);
+  }
+  else
+  {
+    update_block(wv, hash, false, lb, b);
+  }
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2b_update_multi(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks,
+  uint32_t nb
+)
+{
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    FStar_UInt128_uint128
+    totlen =
+      FStar_UInt128_add_mod(prev,
+        FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U)));
+    uint8_t *b = blocks + i * 128U;
+    update_block(wv, hash, false, totlen, b);
+  }
+}
+
+void
+Hacl_Hash_Blake2b_update_last(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint32_t rem,
+  uint8_t *d
+)
+{
+  uint8_t b[128U] = { 0U };
+  uint8_t *last = d + len - rem;
+  memcpy(b, last, rem * sizeof (uint8_t));
+  FStar_UInt128_uint128
+  totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
+  update_block(wv, hash, true, totlen, b);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
+}
+
+static void
+update_blocks(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks
+)
+{
+  uint32_t nb0 = len / 128U;
+  uint32_t rem0 = len % 128U;
+  uint32_t nb;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    nb = nb0 - 1U;
+  }
+  else
+  {
+    nb = nb0;
+  }
+  uint32_t rem;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    rem = 128U;
+  }
+  else
+  {
+    rem = rem0;
+  }
+  Hacl_Hash_Blake2b_update_multi(len, wv, hash, prev, blocks, nb);
+  Hacl_Hash_Blake2b_update_last(len, wv, hash, prev, rem, blocks);
+}
+
+static inline void
+update(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
+{
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  if (kk > 0U)
+  {
+    update_key(wv, hash, kk, k, ll);
+    if (!(ll == 0U))
+    {
+      update_blocks(ll, wv, hash, lb, d);
+      return;
+    }
+    return;
+  }
+  update_blocks(ll, wv, hash, FStar_UInt128_uint64_to_uint128((uint64_t)0U), d);
+}
+
+void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash)
+{
+  uint8_t b[64U] = { 0U };
+  uint8_t *first = b;
+  uint8_t *second = b + 32U;
+  uint64_t *row0 = hash;
+  uint64_t *row1 = hash + 4U;
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(first + i * 8U, row0[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(second + i * 8U, row1[i]););
+  uint8_t *final = b;
+  memcpy(output, final, nn * sizeof (uint8_t));
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
+  uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
+  Hacl_Hash_Blake2b_block_state_t block_state = { .fst = wv, .snd = b };
+  Hacl_Hash_Blake2b_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  Hacl_Hash_Blake2b_state_t
+  *p = (Hacl_Hash_Blake2b_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_state_t));
+  p[0U] = s;
+  Hacl_Hash_Blake2b_init(block_state.snd, 0U, 64U);
+  return p;
+}
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *state)
+{
+  Hacl_Hash_Blake2b_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state;
+  Hacl_Hash_Blake2b_init(block_state.snd, 0U, 64U);
+  Hacl_Hash_Blake2b_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
+}
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint32_t chunk_len)
+{
+  Hacl_Hash_Blake2b_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
+  {
+    sz = 128U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)128U);
+  }
+  if (chunk_len <= 128U - sz)
+  {
+    Hacl_Hash_Blake2b_state_t s1 = *state;
+    Hacl_Hash_Blake2b_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_Hash_Blake2b_state_t s1 = *state;
+    Hacl_Hash_Blake2b_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      uint64_t *wv = block_state1.fst;
+      uint64_t *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2b_update_multi(128U,
+        wv,
+        hash,
+        FStar_UInt128_uint64_to_uint128(prevlen),
+        buf,
+        nb);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)128U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 128U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)128U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    uint64_t *wv = block_state1.fst;
+    uint64_t *hash = block_state1.snd;
+    uint32_t nb = data1_len / 128U;
+    Hacl_Hash_Blake2b_update_multi(data1_len,
+      wv,
+      hash,
+      FStar_UInt128_uint64_to_uint128(total_len1),
+      data1,
+      nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 128U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_Blake2b_state_t s1 = *state;
+    Hacl_Hash_Blake2b_block_state_t block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 128U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2
+        }
+      );
+    Hacl_Hash_Blake2b_state_t s10 = *state;
+    Hacl_Hash_Blake2b_block_state_t block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      uint64_t *wv = block_state1.fst;
+      uint64_t *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2b_update_multi(128U,
+        wv,
+        hash,
+        FStar_UInt128_uint64_to_uint128(prevlen),
+        buf,
+        nb);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 128U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)128U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    uint64_t *wv = block_state1.fst;
+    uint64_t *hash = block_state1.snd;
+    uint32_t nb = data1_len / 128U;
+    Hacl_Hash_Blake2b_update_multi(data1_len,
+      wv,
+      hash,
+      FStar_UInt128_uint64_to_uint128(total_len1),
+      data1,
+      nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+/**
+  Finish function when there is no key
+*/
+void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output)
+{
+  Hacl_Hash_Blake2b_state_t scrut = *state;
+  Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint32_t r;
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
+  {
+    r = 128U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)128U);
+  }
+  uint8_t *buf_1 = buf_;
+  uint64_t wv0[16U] = { 0U };
+  uint64_t b[16U] = { 0U };
+  Hacl_Hash_Blake2b_block_state_t tmp_block_state = { .fst = wv0, .snd = b };
+  uint64_t *src_b = block_state.snd;
+  uint64_t *dst_b = tmp_block_state.snd;
+  memcpy(dst_b, src_b, 16U * sizeof (uint64_t));
+  uint64_t prev_len = total_len - (uint64_t)r;
+  uint32_t ite;
+  if (r % 128U == 0U && r > 0U)
+  {
+    ite = 128U;
+  }
+  else
+  {
+    ite = r % 128U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  uint64_t *wv1 = tmp_block_state.fst;
+  uint64_t *hash0 = tmp_block_state.snd;
+  uint32_t nb = 0U;
+  Hacl_Hash_Blake2b_update_multi(0U,
+    wv1,
+    hash0,
+    FStar_UInt128_uint64_to_uint128(prev_len),
+    buf_multi,
+    nb);
+  uint64_t prev_len_last = total_len - (uint64_t)r;
+  uint64_t *wv = tmp_block_state.fst;
+  uint64_t *hash = tmp_block_state.snd;
+  Hacl_Hash_Blake2b_update_last(r,
+    wv,
+    hash,
+    FStar_UInt128_uint64_to_uint128(prev_len_last),
+    r,
+    buf_last);
+  Hacl_Hash_Blake2b_finish(64U, output, tmp_block_state.snd);
+}
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state)
+{
+  Hacl_Hash_Blake2b_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state;
+  uint64_t *wv = block_state.fst;
+  uint64_t *b = block_state.snd;
+  KRML_HOST_FREE(wv);
+  KRML_HOST_FREE(b);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+/**
+Write the BLAKE2b digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2b_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+)
+{
+  uint64_t b[16U] = { 0U };
+  uint64_t b1[16U] = { 0U };
+  Hacl_Hash_Blake2b_init(b, key_len, output_len);
+  update(b1, b, key_len, key, input_len, input);
+  Hacl_Hash_Blake2b_finish(output_len, output, b);
+  Lib_Memzero0_memzero(b1, 16U, uint64_t);
+  Lib_Memzero0_memzero(b, 16U, uint64_t);
+}
+
diff --git a/src/Hacl_Hash_Blake2b_256.c b/src/Hacl_Hash_Blake2b_256.c
deleted file mode 100644
index b37ffc5f..00000000
--- a/src/Hacl_Hash_Blake2b_256.c
+++ /dev/null
@@ -1,499 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Hash_Blake2b_256.h"
-
-#include "internal/Hacl_Impl_Blake2_Constants.h"
-#include "internal/Hacl_Hash_Blake2.h"
-#include "lib_memzero0.h"
-
-static inline void
-blake2b_update_block(
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  bool flag,
-  FStar_UInt128_uint128 totlen,
-  uint8_t *d
-)
-{
-  uint64_t m_w[16U] = { 0U };
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_zero;
-  uint64_t wv_14;
-  if (flag)
-  {
-    wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  }
-  else
-  {
-    wv_14 = (uint64_t)0U;
-  }
-  uint64_t wv_15 = (uint64_t)0U;
-  mask =
-    Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen),
-      FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U)),
-      wv_14,
-      wv_15);
-  memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Lib_IntVector_Intrinsics_vec256 *wv3 = wv + (uint32_t)3U;
-  wv3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv3[0U], mask);
-  KRML_MAYBE_FOR12(i,
-    (uint32_t)0U,
-    (uint32_t)12U,
-    (uint32_t)1U,
-    uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U;
-    KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 m_st[4U] KRML_POST_ALIGN(32) = { 0U };
-    Lib_IntVector_Intrinsics_vec256 *r0 = m_st;
-    Lib_IntVector_Intrinsics_vec256 *r1 = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r20 = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r30 = m_st + (uint32_t)3U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
-    r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
-    r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
-    r20[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
-    r30[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
-    Lib_IntVector_Intrinsics_vec256 *x = m_st;
-    Lib_IntVector_Intrinsics_vec256 *y = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *z = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *w = m_st + (uint32_t)3U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * (uint32_t)1U;
-    wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]);
-    wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * (uint32_t)1U;
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]);
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], (uint32_t)32U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * (uint32_t)1U;
-    wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * (uint32_t)1U;
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]);
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], (uint32_t)24U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * (uint32_t)1U;
-    wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]);
-    wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * (uint32_t)1U;
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]);
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * (uint32_t)1U;
-    wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * (uint32_t)1U;
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]);
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], (uint32_t)63U);
-    Lib_IntVector_Intrinsics_vec256 *r10 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r21 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r31 = wv + (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 v00 = r10[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, (uint32_t)1U);
-    r10[0U] = v1;
-    Lib_IntVector_Intrinsics_vec256 v01 = r21[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, (uint32_t)2U);
-    r21[0U] = v10;
-    Lib_IntVector_Intrinsics_vec256 v02 = r31[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, (uint32_t)3U);
-    r31[0U] = v11;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * (uint32_t)1U;
-    wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]);
-    wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * (uint32_t)1U;
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]);
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], (uint32_t)32U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * (uint32_t)1U;
-    wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * (uint32_t)1U;
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]);
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], (uint32_t)24U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * (uint32_t)1U;
-    wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]);
-    wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * (uint32_t)1U;
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]);
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * (uint32_t)1U;
-    wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * (uint32_t)1U;
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]);
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], (uint32_t)63U);
-    Lib_IntVector_Intrinsics_vec256 *r11 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 v0 = r11[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, (uint32_t)3U);
-    r11[0U] = v12;
-    Lib_IntVector_Intrinsics_vec256 v03 = r2[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, (uint32_t)2U);
-    r2[0U] = v13;
-    Lib_IntVector_Intrinsics_vec256 v04 = r3[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, (uint32_t)1U);
-    r3[0U] = v14;);
-  Lib_IntVector_Intrinsics_vec256 *s0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *s1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r0 = wv;
-  Lib_IntVector_Intrinsics_vec256 *r1 = wv + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U;
-  s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r0[0U]);
-  s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r2[0U]);
-  s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r1[0U]);
-  s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r3[0U]);
-}
-
-void
-Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn)
-{
-  Lib_IntVector_Intrinsics_vec256 *r0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *r1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = hash + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = hash + (uint32_t)3U;
-  uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U];
-  uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U];
-  uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U];
-  uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U];
-  uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U];
-  uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U];
-  uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U];
-  uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U];
-  r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3);
-  r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
-  uint64_t kk_shift_8 = (uint64_t)kk << (uint32_t)8U;
-  uint64_t iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn));
-  r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3);
-  r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
-}
-
-void
-Hacl_Blake2b_256_blake2b_update_key(
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-)
-{
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  uint8_t b[128U] = { 0U };
-  memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
-  {
-    blake2b_update_block(wv, hash, true, lb, b);
-  }
-  else
-  {
-    blake2b_update_block(wv, hash, false, lb, b);
-  }
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
-}
-
-void
-Hacl_Blake2b_256_blake2b_update_multi(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks,
-  uint32_t nb
-)
-{
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    FStar_UInt128_uint128
-    totlen =
-      FStar_UInt128_add_mod(prev,
-        FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U)));
-    uint8_t *b = blocks + i * (uint32_t)128U;
-    blake2b_update_block(wv, hash, false, totlen, b);
-  }
-}
-
-void
-Hacl_Blake2b_256_blake2b_update_last(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  FStar_UInt128_uint128 prev,
-  uint32_t rem,
-  uint8_t *d
-)
-{
-  uint8_t b[128U] = { 0U };
-  uint8_t *last = d + len - rem;
-  memcpy(b, last, rem * sizeof (uint8_t));
-  FStar_UInt128_uint128
-  totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
-  blake2b_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
-}
-
-static inline void
-blake2b_update_blocks(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks
-)
-{
-  uint32_t nb0 = len / (uint32_t)128U;
-  uint32_t rem0 = len % (uint32_t)128U;
-  K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
-  {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)128U;
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
-  }
-  else
-  {
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb0, .snd = rem0 });
-  }
-  uint32_t nb = scrut.fst;
-  uint32_t rem = scrut.snd;
-  Hacl_Blake2b_256_blake2b_update_multi(len, wv, hash, prev, blocks, nb);
-  Hacl_Blake2b_256_blake2b_update_last(len, wv, hash, prev, rem, blocks);
-}
-
-static inline void
-blake2b_update(
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll,
-  uint8_t *d
-)
-{
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  if (kk > (uint32_t)0U)
-  {
-    Hacl_Blake2b_256_blake2b_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
-    {
-      blake2b_update_blocks(ll, wv, hash, lb, d);
-      return;
-    }
-    return;
-  }
-  blake2b_update_blocks(ll,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U),
-    d);
-}
-
-void
-Hacl_Blake2b_256_blake2b_finish(
-  uint32_t nn,
-  uint8_t *output,
-  Lib_IntVector_Intrinsics_vec256 *hash
-)
-{
-  uint8_t b[64U] = { 0U };
-  uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)32U;
-  Lib_IntVector_Intrinsics_vec256 *row0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *row1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256_store64_le(first, row0[0U]);
-  Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]);
-  uint8_t *final = b;
-  memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-/**
-Write the BLAKE2b digest of message `d` using key `k` into `output`.
-
-@param nn Length of the to-be-generated digest with 1 <= `nn` <= 64.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2b_256_blake2b(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-)
-{
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U };
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b1[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_init(b, kk, nn);
-  blake2b_update(b1, b, kk, k, ll, d);
-  Hacl_Blake2b_256_blake2b_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)4U, Lib_IntVector_Intrinsics_vec256);
-  Lib_Memzero0_memzero(b, (uint32_t)4U, Lib_IntVector_Intrinsics_vec256);
-}
-
-void
-Hacl_Blake2b_256_load_state256b_from_state32(
-  Lib_IntVector_Intrinsics_vec256 *st,
-  uint64_t *st32
-)
-{
-  Lib_IntVector_Intrinsics_vec256 *r0 = st;
-  Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U;
-  uint64_t *b0 = st32;
-  uint64_t *b1 = st32 + (uint32_t)4U;
-  uint64_t *b2 = st32 + (uint32_t)8U;
-  uint64_t *b3 = st32 + (uint32_t)12U;
-  r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b0[0U], b0[1U], b0[2U], b0[3U]);
-  r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b1[0U], b1[1U], b1[2U], b1[3U]);
-  r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b2[0U], b2[1U], b2[2U], b2[3U]);
-  r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b3[0U], b3[1U], b3[2U], b3[3U]);
-}
-
-void
-Hacl_Blake2b_256_store_state256b_to_state32(
-  uint64_t *st32,
-  Lib_IntVector_Intrinsics_vec256 *st
-)
-{
-  Lib_IntVector_Intrinsics_vec256 *r0 = st;
-  Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U;
-  uint64_t *b0 = st32;
-  uint64_t *b1 = st32 + (uint32_t)4U;
-  uint64_t *b2 = st32 + (uint32_t)8U;
-  uint64_t *b3 = st32 + (uint32_t)12U;
-  uint8_t b8[32U] = { 0U };
-  Lib_IntVector_Intrinsics_vec256_store64_le(b8, r0[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = b0;
-    uint8_t *bj = b8 + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  uint8_t b80[32U] = { 0U };
-  Lib_IntVector_Intrinsics_vec256_store64_le(b80, r1[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = b1;
-    uint8_t *bj = b80 + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  uint8_t b81[32U] = { 0U };
-  Lib_IntVector_Intrinsics_vec256_store64_le(b81, r2[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = b2;
-    uint8_t *bj = b81 + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  uint8_t b82[32U] = { 0U };
-  Lib_IntVector_Intrinsics_vec256_store64_le(b82, r3[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = b3;
-    uint8_t *bj = b82 + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-}
-
-Lib_IntVector_Intrinsics_vec256 *Hacl_Blake2b_256_blake2b_malloc(void)
-{
-  Lib_IntVector_Intrinsics_vec256
-  *buf =
-    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  return buf;
-}
-
diff --git a/src/Hacl_Hash_Blake2b_Simd256.c b/src/Hacl_Hash_Blake2b_Simd256.c
new file mode 100644
index 00000000..1a5e8cf2
--- /dev/null
+++ b/src/Hacl_Hash_Blake2b_Simd256.c
@@ -0,0 +1,828 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_Blake2b_Simd256.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "lib_memzero0.h"
+
+static inline void
+update_block(
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  bool flag,
+  FStar_UInt128_uint128 totlen,
+  uint8_t *d
+)
+{
+  uint64_t m_w[16U] = { 0U };
+  KRML_MAYBE_FOR16(i,
+    0U,
+    16U,
+    1U,
+    uint64_t *os = m_w;
+    uint8_t *bj = d + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_zero;
+  uint64_t wv_14;
+  if (flag)
+  {
+    wv_14 = 0xFFFFFFFFFFFFFFFFULL;
+  }
+  else
+  {
+    wv_14 = 0ULL;
+  }
+  uint64_t wv_15 = 0ULL;
+  mask =
+    Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen),
+      FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U)),
+      wv_14,
+      wv_15);
+  memcpy(wv, hash, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Lib_IntVector_Intrinsics_vec256 *wv3 = wv + 3U;
+  wv3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv3[0U], mask);
+  KRML_MAYBE_FOR12(i,
+    0U,
+    12U,
+    1U,
+    uint32_t start_idx = i % 10U * 16U;
+    KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 m_st[4U] KRML_POST_ALIGN(32) = { 0U };
+    Lib_IntVector_Intrinsics_vec256 *r0 = m_st;
+    Lib_IntVector_Intrinsics_vec256 *r1 = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r20 = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r30 = m_st + 3U;
+    uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U];
+    r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
+    r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
+    r20[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
+    r30[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
+    Lib_IntVector_Intrinsics_vec256 *x = m_st;
+    Lib_IntVector_Intrinsics_vec256 *y = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec256 *z = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec256 *w = m_st + 3U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * 1U;
+    wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]);
+    wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * 1U;
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]);
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], 32U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * 1U;
+    wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * 1U;
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]);
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], 24U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * 1U;
+    wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]);
+    wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * 1U;
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]);
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], 16U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * 1U;
+    wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * 1U;
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]);
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], 63U);
+    Lib_IntVector_Intrinsics_vec256 *r10 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r21 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r31 = wv + 3U;
+    Lib_IntVector_Intrinsics_vec256 v00 = r10[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, 1U);
+    r10[0U] = v1;
+    Lib_IntVector_Intrinsics_vec256 v01 = r21[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, 2U);
+    r21[0U] = v10;
+    Lib_IntVector_Intrinsics_vec256 v02 = r31[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, 3U);
+    r31[0U] = v11;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * 1U;
+    wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]);
+    wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * 1U;
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]);
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], 32U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * 1U;
+    wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * 1U;
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]);
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], 24U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * 1U;
+    wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]);
+    wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * 1U;
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]);
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], 16U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * 1U;
+    wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * 1U;
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]);
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], 63U);
+    Lib_IntVector_Intrinsics_vec256 *r11 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r2 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r3 = wv + 3U;
+    Lib_IntVector_Intrinsics_vec256 v0 = r11[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, 3U);
+    r11[0U] = v12;
+    Lib_IntVector_Intrinsics_vec256 v03 = r2[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, 2U);
+    r2[0U] = v13;
+    Lib_IntVector_Intrinsics_vec256 v04 = r3[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, 1U);
+    r3[0U] = v14;);
+  Lib_IntVector_Intrinsics_vec256 *s0 = hash;
+  Lib_IntVector_Intrinsics_vec256 *s1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r0 = wv;
+  Lib_IntVector_Intrinsics_vec256 *r1 = wv + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = wv + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = wv + 3U;
+  s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r0[0U]);
+  s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r2[0U]);
+  s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r1[0U]);
+  s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r3[0U]);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn)
+{
+  Lib_IntVector_Intrinsics_vec256 *r0 = hash;
+  Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U;
+  uint64_t iv0 = Hacl_Hash_Blake2s_ivTable_B[0U];
+  uint64_t iv1 = Hacl_Hash_Blake2s_ivTable_B[1U];
+  uint64_t iv2 = Hacl_Hash_Blake2s_ivTable_B[2U];
+  uint64_t iv3 = Hacl_Hash_Blake2s_ivTable_B[3U];
+  uint64_t iv4 = Hacl_Hash_Blake2s_ivTable_B[4U];
+  uint64_t iv5 = Hacl_Hash_Blake2s_ivTable_B[5U];
+  uint64_t iv6 = Hacl_Hash_Blake2s_ivTable_B[6U];
+  uint64_t iv7 = Hacl_Hash_Blake2s_ivTable_B[7U];
+  r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3);
+  r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
+  uint64_t kk_shift_8 = (uint64_t)kk << 8U;
+  uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn));
+  r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3);
+  r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
+}
+
+static void
+update_key(
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  uint32_t kk,
+  uint8_t *k,
+  uint32_t ll
+)
+{
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  uint8_t b[128U] = { 0U };
+  memcpy(b, k, kk * sizeof (uint8_t));
+  if (ll == 0U)
+  {
+    update_block(wv, hash, true, lb, b);
+  }
+  else
+  {
+    update_block(wv, hash, false, lb, b);
+  }
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_update_multi(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks,
+  uint32_t nb
+)
+{
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    FStar_UInt128_uint128
+    totlen =
+      FStar_UInt128_add_mod(prev,
+        FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U)));
+    uint8_t *b = blocks + i * 128U;
+    update_block(wv, hash, false, totlen, b);
+  }
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_update_last(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  FStar_UInt128_uint128 prev,
+  uint32_t rem,
+  uint8_t *d
+)
+{
+  uint8_t b[128U] = { 0U };
+  uint8_t *last = d + len - rem;
+  memcpy(b, last, rem * sizeof (uint8_t));
+  FStar_UInt128_uint128
+  totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
+  update_block(wv, hash, true, totlen, b);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
+}
+
+static inline void
+update_blocks(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks
+)
+{
+  uint32_t nb0 = len / 128U;
+  uint32_t rem0 = len % 128U;
+  uint32_t nb;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    nb = nb0 - 1U;
+  }
+  else
+  {
+    nb = nb0;
+  }
+  uint32_t rem;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    rem = 128U;
+  }
+  else
+  {
+    rem = rem0;
+  }
+  Hacl_Hash_Blake2b_Simd256_update_multi(len, wv, hash, prev, blocks, nb);
+  Hacl_Hash_Blake2b_Simd256_update_last(len, wv, hash, prev, rem, blocks);
+}
+
+static inline void
+update(
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  uint32_t kk,
+  uint8_t *k,
+  uint32_t ll,
+  uint8_t *d
+)
+{
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  if (kk > 0U)
+  {
+    update_key(wv, hash, kk, k, ll);
+    if (!(ll == 0U))
+    {
+      update_blocks(ll, wv, hash, lb, d);
+      return;
+    }
+    return;
+  }
+  update_blocks(ll, wv, hash, FStar_UInt128_uint64_to_uint128((uint64_t)0U), d);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_finish(
+  uint32_t nn,
+  uint8_t *output,
+  Lib_IntVector_Intrinsics_vec256 *hash
+)
+{
+  uint8_t b[64U] = { 0U };
+  uint8_t *first = b;
+  uint8_t *second = b + 32U;
+  Lib_IntVector_Intrinsics_vec256 *row0 = hash;
+  Lib_IntVector_Intrinsics_vec256 *row1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec256_store64_le(first, row0[0U]);
+  Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]);
+  uint8_t *final = b;
+  memcpy(output, final, nn * sizeof (uint8_t));
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_load_state256b_from_state32(
+  Lib_IntVector_Intrinsics_vec256 *st,
+  uint64_t *st32
+)
+{
+  Lib_IntVector_Intrinsics_vec256 *r0 = st;
+  Lib_IntVector_Intrinsics_vec256 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = st + 3U;
+  uint64_t *b0 = st32;
+  uint64_t *b1 = st32 + 4U;
+  uint64_t *b2 = st32 + 8U;
+  uint64_t *b3 = st32 + 12U;
+  r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b0[0U], b0[1U], b0[2U], b0[3U]);
+  r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b1[0U], b1[1U], b1[2U], b1[3U]);
+  r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b2[0U], b2[1U], b2[2U], b2[3U]);
+  r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b3[0U], b3[1U], b3[2U], b3[3U]);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32(
+  uint64_t *st32,
+  Lib_IntVector_Intrinsics_vec256 *st
+)
+{
+  Lib_IntVector_Intrinsics_vec256 *r0 = st;
+  Lib_IntVector_Intrinsics_vec256 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = st + 3U;
+  uint64_t *b0 = st32;
+  uint64_t *b1 = st32 + 4U;
+  uint64_t *b2 = st32 + 8U;
+  uint64_t *b3 = st32 + 12U;
+  uint8_t b8[32U] = { 0U };
+  Lib_IntVector_Intrinsics_vec256_store64_le(b8, r0[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = b0;
+    uint8_t *bj = b8 + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  uint8_t b80[32U] = { 0U };
+  Lib_IntVector_Intrinsics_vec256_store64_le(b80, r1[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = b1;
+    uint8_t *bj = b80 + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  uint8_t b81[32U] = { 0U };
+  Lib_IntVector_Intrinsics_vec256_store64_le(b81, r2[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = b2;
+    uint8_t *bj = b81 + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  uint8_t b82[32U] = { 0U };
+  Lib_IntVector_Intrinsics_vec256_store64_le(b82, r3[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = b3;
+    uint8_t *bj = b82 + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+}
+
+Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_Simd256_malloc_with_key(void)
+{
+  Lib_IntVector_Intrinsics_vec256
+  *buf =
+    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(buf, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  return buf;
+}
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  Lib_IntVector_Intrinsics_vec256
+  *wv =
+    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Lib_IntVector_Intrinsics_vec256
+  *b =
+    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state = { .fst = wv, .snd = b };
+  Hacl_Hash_Blake2b_Simd256_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  Hacl_Hash_Blake2b_Simd256_state_t
+  *p =
+    (Hacl_Hash_Blake2b_Simd256_state_t *)KRML_HOST_MALLOC(sizeof (
+        Hacl_Hash_Blake2b_Simd256_state_t
+      ));
+  p[0U] = s;
+  Hacl_Hash_Blake2b_Simd256_init(block_state.snd, 0U, 64U);
+  return p;
+}
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *state)
+{
+  Hacl_Hash_Blake2b_Simd256_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state;
+  Hacl_Hash_Blake2b_Simd256_init(block_state.snd, 0U, 64U);
+  Hacl_Hash_Blake2b_Simd256_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
+}
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2b_Simd256_update(
+  Hacl_Hash_Blake2b_Simd256_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+)
+{
+  Hacl_Hash_Blake2b_Simd256_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
+  {
+    sz = 128U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)128U);
+  }
+  if (chunk_len <= 128U - sz)
+  {
+    Hacl_Hash_Blake2b_Simd256_state_t s1 = *state;
+    Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_Hash_Blake2b_Simd256_state_t s1 = *state;
+    Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
+      Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2b_Simd256_update_multi(128U,
+        wv,
+        hash,
+        FStar_UInt128_uint64_to_uint128(prevlen),
+        buf,
+        nb);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)128U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 128U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)128U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
+    Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
+    uint32_t nb = data1_len / 128U;
+    Hacl_Hash_Blake2b_Simd256_update_multi(data1_len,
+      wv,
+      hash,
+      FStar_UInt128_uint64_to_uint128(total_len1),
+      data1,
+      nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 128U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_Blake2b_Simd256_state_t s1 = *state;
+    Hacl_Hash_Blake2b_Simd256_block_state_t block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 128U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_Simd256_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2
+        }
+      );
+    Hacl_Hash_Blake2b_Simd256_state_t s10 = *state;
+    Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
+      Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2b_Simd256_update_multi(128U,
+        wv,
+        hash,
+        FStar_UInt128_uint64_to_uint128(prevlen),
+        buf,
+        nb);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 128U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)128U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
+    Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
+    uint32_t nb = data1_len / 128U;
+    Hacl_Hash_Blake2b_Simd256_update_multi(data1_len,
+      wv,
+      hash,
+      FStar_UInt128_uint64_to_uint128(total_len1),
+      data1,
+      nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+/**
+  Finish function when there is no key
+*/
+void
+Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output)
+{
+  Hacl_Hash_Blake2b_Simd256_state_t scrut = *state;
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint32_t r;
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
+  {
+    r = 128U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)128U);
+  }
+  uint8_t *buf_1 = buf_;
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U };
+  Hacl_Hash_Blake2b_Simd256_block_state_t tmp_block_state = { .fst = wv0, .snd = b };
+  Lib_IntVector_Intrinsics_vec256 *src_b = block_state.snd;
+  Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.snd;
+  memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  uint64_t prev_len = total_len - (uint64_t)r;
+  uint32_t ite;
+  if (r % 128U == 0U && r > 0U)
+  {
+    ite = 128U;
+  }
+  else
+  {
+    ite = r % 128U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  Lib_IntVector_Intrinsics_vec256 *wv1 = tmp_block_state.fst;
+  Lib_IntVector_Intrinsics_vec256 *hash0 = tmp_block_state.snd;
+  uint32_t nb = 0U;
+  Hacl_Hash_Blake2b_Simd256_update_multi(0U,
+    wv1,
+    hash0,
+    FStar_UInt128_uint64_to_uint128(prev_len),
+    buf_multi,
+    nb);
+  uint64_t prev_len_last = total_len - (uint64_t)r;
+  Lib_IntVector_Intrinsics_vec256 *wv = tmp_block_state.fst;
+  Lib_IntVector_Intrinsics_vec256 *hash = tmp_block_state.snd;
+  Hacl_Hash_Blake2b_Simd256_update_last(r,
+    wv,
+    hash,
+    FStar_UInt128_uint64_to_uint128(prev_len_last),
+    r,
+    buf_last);
+  Hacl_Hash_Blake2b_Simd256_finish(64U, output, tmp_block_state.snd);
+}
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state)
+{
+  Hacl_Hash_Blake2b_Simd256_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state;
+  Lib_IntVector_Intrinsics_vec256 *wv = block_state.fst;
+  Lib_IntVector_Intrinsics_vec256 *b = block_state.snd;
+  KRML_ALIGNED_FREE(wv);
+  KRML_ALIGNED_FREE(b);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+/**
+Write the BLAKE2b digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2b_Simd256_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+)
+{
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U };
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b1[4U] KRML_POST_ALIGN(32) = { 0U };
+  Hacl_Hash_Blake2b_Simd256_init(b, key_len, output_len);
+  update(b1, b, key_len, key, input_len, input);
+  Hacl_Hash_Blake2b_Simd256_finish(output_len, output, b);
+  Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256);
+  Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256);
+}
+
diff --git a/src/Hacl_Hash_Blake2s.c b/src/Hacl_Hash_Blake2s.c
new file mode 100644
index 00000000..652c3f33
--- /dev/null
+++ b/src/Hacl_Hash_Blake2s.c
@@ -0,0 +1,931 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_Blake2s.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "lib_memzero0.h"
+
+static inline void
+update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t *d)
+{
+  uint32_t m_w[16U] = { 0U };
+  KRML_MAYBE_FOR16(i,
+    0U,
+    16U,
+    1U,
+    uint32_t *os = m_w;
+    uint8_t *bj = d + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  uint32_t mask[4U] = { 0U };
+  uint32_t wv_14;
+  if (flag)
+  {
+    wv_14 = 0xFFFFFFFFU;
+  }
+  else
+  {
+    wv_14 = 0U;
+  }
+  uint32_t wv_15 = 0U;
+  mask[0U] = (uint32_t)totlen;
+  mask[1U] = (uint32_t)(totlen >> 32U);
+  mask[2U] = wv_14;
+  mask[3U] = wv_15;
+  memcpy(wv, hash, 16U * sizeof (uint32_t));
+  uint32_t *wv3 = wv + 12U;
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = wv3;
+    uint32_t x = wv3[i] ^ mask[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR10(i0,
+    0U,
+    10U,
+    1U,
+    uint32_t start_idx = i0 % 10U * 16U;
+    uint32_t m_st[16U] = { 0U };
+    uint32_t *r0 = m_st;
+    uint32_t *r1 = m_st + 4U;
+    uint32_t *r20 = m_st + 8U;
+    uint32_t *r30 = m_st + 12U;
+    uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U];
+    uint32_t uu____0 = m_w[s2];
+    uint32_t uu____1 = m_w[s4];
+    uint32_t uu____2 = m_w[s6];
+    r0[0U] = m_w[s0];
+    r0[1U] = uu____0;
+    r0[2U] = uu____1;
+    r0[3U] = uu____2;
+    uint32_t uu____3 = m_w[s3];
+    uint32_t uu____4 = m_w[s5];
+    uint32_t uu____5 = m_w[s7];
+    r1[0U] = m_w[s1];
+    r1[1U] = uu____3;
+    r1[2U] = uu____4;
+    r1[3U] = uu____5;
+    uint32_t uu____6 = m_w[s10];
+    uint32_t uu____7 = m_w[s12];
+    uint32_t uu____8 = m_w[s14];
+    r20[0U] = m_w[s8];
+    r20[1U] = uu____6;
+    r20[2U] = uu____7;
+    r20[3U] = uu____8;
+    uint32_t uu____9 = m_w[s11];
+    uint32_t uu____10 = m_w[s13];
+    uint32_t uu____11 = m_w[s15];
+    r30[0U] = m_w[s9];
+    r30[1U] = uu____9;
+    r30[2U] = uu____10;
+    r30[3U] = uu____11;
+    uint32_t *x = m_st;
+    uint32_t *y = m_st + 4U;
+    uint32_t *z = m_st + 8U;
+    uint32_t *w = m_st + 12U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    uint32_t *wv_a0 = wv + a * 4U;
+    uint32_t *wv_b0 = wv + b0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a0;
+      uint32_t x1 = wv_a0[i] + wv_b0[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a0;
+      uint32_t x1 = wv_a0[i] + x[i];
+      os[i] = x1;);
+    uint32_t *wv_a1 = wv + d10 * 4U;
+    uint32_t *wv_b1 = wv + a * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a1;
+      uint32_t x1 = wv_a1[i] ^ wv_b1[i];
+      os[i] = x1;);
+    uint32_t *r10 = wv_a1;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r10;
+      uint32_t x1 = r10[i];
+      uint32_t x10 = x1 >> 16U | x1 << 16U;
+      os[i] = x10;);
+    uint32_t *wv_a2 = wv + c0 * 4U;
+    uint32_t *wv_b2 = wv + d10 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a2;
+      uint32_t x1 = wv_a2[i] + wv_b2[i];
+      os[i] = x1;);
+    uint32_t *wv_a3 = wv + b0 * 4U;
+    uint32_t *wv_b3 = wv + c0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a3;
+      uint32_t x1 = wv_a3[i] ^ wv_b3[i];
+      os[i] = x1;);
+    uint32_t *r12 = wv_a3;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r12;
+      uint32_t x1 = r12[i];
+      uint32_t x10 = x1 >> 12U | x1 << 20U;
+      os[i] = x10;);
+    uint32_t *wv_a4 = wv + a * 4U;
+    uint32_t *wv_b4 = wv + b0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a4;
+      uint32_t x1 = wv_a4[i] + wv_b4[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a4;
+      uint32_t x1 = wv_a4[i] + y[i];
+      os[i] = x1;);
+    uint32_t *wv_a5 = wv + d10 * 4U;
+    uint32_t *wv_b5 = wv + a * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a5;
+      uint32_t x1 = wv_a5[i] ^ wv_b5[i];
+      os[i] = x1;);
+    uint32_t *r13 = wv_a5;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r13;
+      uint32_t x1 = r13[i];
+      uint32_t x10 = x1 >> 8U | x1 << 24U;
+      os[i] = x10;);
+    uint32_t *wv_a6 = wv + c0 * 4U;
+    uint32_t *wv_b6 = wv + d10 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a6;
+      uint32_t x1 = wv_a6[i] + wv_b6[i];
+      os[i] = x1;);
+    uint32_t *wv_a7 = wv + b0 * 4U;
+    uint32_t *wv_b7 = wv + c0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a7;
+      uint32_t x1 = wv_a7[i] ^ wv_b7[i];
+      os[i] = x1;);
+    uint32_t *r14 = wv_a7;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r14;
+      uint32_t x1 = r14[i];
+      uint32_t x10 = x1 >> 7U | x1 << 25U;
+      os[i] = x10;);
+    uint32_t *r15 = wv + 4U;
+    uint32_t *r21 = wv + 8U;
+    uint32_t *r31 = wv + 12U;
+    uint32_t *r110 = r15;
+    uint32_t x00 = r110[1U];
+    uint32_t x10 = r110[2U];
+    uint32_t x20 = r110[3U];
+    uint32_t x30 = r110[0U];
+    r110[0U] = x00;
+    r110[1U] = x10;
+    r110[2U] = x20;
+    r110[3U] = x30;
+    uint32_t *r111 = r21;
+    uint32_t x01 = r111[2U];
+    uint32_t x11 = r111[3U];
+    uint32_t x21 = r111[0U];
+    uint32_t x31 = r111[1U];
+    r111[0U] = x01;
+    r111[1U] = x11;
+    r111[2U] = x21;
+    r111[3U] = x31;
+    uint32_t *r112 = r31;
+    uint32_t x02 = r112[3U];
+    uint32_t x12 = r112[0U];
+    uint32_t x22 = r112[1U];
+    uint32_t x32 = r112[2U];
+    r112[0U] = x02;
+    r112[1U] = x12;
+    r112[2U] = x22;
+    r112[3U] = x32;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    uint32_t *wv_a = wv + a0 * 4U;
+    uint32_t *wv_b8 = wv + b * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a;
+      uint32_t x1 = wv_a[i] + wv_b8[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a;
+      uint32_t x1 = wv_a[i] + z[i];
+      os[i] = x1;);
+    uint32_t *wv_a8 = wv + d1 * 4U;
+    uint32_t *wv_b9 = wv + a0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a8;
+      uint32_t x1 = wv_a8[i] ^ wv_b9[i];
+      os[i] = x1;);
+    uint32_t *r16 = wv_a8;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r16;
+      uint32_t x1 = r16[i];
+      uint32_t x13 = x1 >> 16U | x1 << 16U;
+      os[i] = x13;);
+    uint32_t *wv_a9 = wv + c * 4U;
+    uint32_t *wv_b10 = wv + d1 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a9;
+      uint32_t x1 = wv_a9[i] + wv_b10[i];
+      os[i] = x1;);
+    uint32_t *wv_a10 = wv + b * 4U;
+    uint32_t *wv_b11 = wv + c * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a10;
+      uint32_t x1 = wv_a10[i] ^ wv_b11[i];
+      os[i] = x1;);
+    uint32_t *r17 = wv_a10;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r17;
+      uint32_t x1 = r17[i];
+      uint32_t x13 = x1 >> 12U | x1 << 20U;
+      os[i] = x13;);
+    uint32_t *wv_a11 = wv + a0 * 4U;
+    uint32_t *wv_b12 = wv + b * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a11;
+      uint32_t x1 = wv_a11[i] + wv_b12[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a11;
+      uint32_t x1 = wv_a11[i] + w[i];
+      os[i] = x1;);
+    uint32_t *wv_a12 = wv + d1 * 4U;
+    uint32_t *wv_b13 = wv + a0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a12;
+      uint32_t x1 = wv_a12[i] ^ wv_b13[i];
+      os[i] = x1;);
+    uint32_t *r18 = wv_a12;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r18;
+      uint32_t x1 = r18[i];
+      uint32_t x13 = x1 >> 8U | x1 << 24U;
+      os[i] = x13;);
+    uint32_t *wv_a13 = wv + c * 4U;
+    uint32_t *wv_b14 = wv + d1 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a13;
+      uint32_t x1 = wv_a13[i] + wv_b14[i];
+      os[i] = x1;);
+    uint32_t *wv_a14 = wv + b * 4U;
+    uint32_t *wv_b = wv + c * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a14;
+      uint32_t x1 = wv_a14[i] ^ wv_b[i];
+      os[i] = x1;);
+    uint32_t *r19 = wv_a14;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r19;
+      uint32_t x1 = r19[i];
+      uint32_t x13 = x1 >> 7U | x1 << 25U;
+      os[i] = x13;);
+    uint32_t *r113 = wv + 4U;
+    uint32_t *r2 = wv + 8U;
+    uint32_t *r3 = wv + 12U;
+    uint32_t *r11 = r113;
+    uint32_t x03 = r11[3U];
+    uint32_t x13 = r11[0U];
+    uint32_t x23 = r11[1U];
+    uint32_t x33 = r11[2U];
+    r11[0U] = x03;
+    r11[1U] = x13;
+    r11[2U] = x23;
+    r11[3U] = x33;
+    uint32_t *r114 = r2;
+    uint32_t x04 = r114[2U];
+    uint32_t x14 = r114[3U];
+    uint32_t x24 = r114[0U];
+    uint32_t x34 = r114[1U];
+    r114[0U] = x04;
+    r114[1U] = x14;
+    r114[2U] = x24;
+    r114[3U] = x34;
+    uint32_t *r115 = r3;
+    uint32_t x0 = r115[1U];
+    uint32_t x1 = r115[2U];
+    uint32_t x2 = r115[3U];
+    uint32_t x3 = r115[0U];
+    r115[0U] = x0;
+    r115[1U] = x1;
+    r115[2U] = x2;
+    r115[3U] = x3;);
+  uint32_t *s0 = hash;
+  uint32_t *s1 = hash + 4U;
+  uint32_t *r0 = wv;
+  uint32_t *r1 = wv + 4U;
+  uint32_t *r2 = wv + 8U;
+  uint32_t *r3 = wv + 12U;
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = s0;
+    uint32_t x = s0[i] ^ r0[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = s0;
+    uint32_t x = s0[i] ^ r2[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = s1;
+    uint32_t x = s1[i] ^ r1[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = s1;
+    uint32_t x = s1[i] ^ r3[i];
+    os[i] = x;);
+}
+
+void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn)
+{
+  uint32_t *r0 = hash;
+  uint32_t *r1 = hash + 4U;
+  uint32_t *r2 = hash + 8U;
+  uint32_t *r3 = hash + 12U;
+  uint32_t iv0 = Hacl_Hash_Blake2s_ivTable_S[0U];
+  uint32_t iv1 = Hacl_Hash_Blake2s_ivTable_S[1U];
+  uint32_t iv2 = Hacl_Hash_Blake2s_ivTable_S[2U];
+  uint32_t iv3 = Hacl_Hash_Blake2s_ivTable_S[3U];
+  uint32_t iv4 = Hacl_Hash_Blake2s_ivTable_S[4U];
+  uint32_t iv5 = Hacl_Hash_Blake2s_ivTable_S[5U];
+  uint32_t iv6 = Hacl_Hash_Blake2s_ivTable_S[6U];
+  uint32_t iv7 = Hacl_Hash_Blake2s_ivTable_S[7U];
+  r2[0U] = iv0;
+  r2[1U] = iv1;
+  r2[2U] = iv2;
+  r2[3U] = iv3;
+  r3[0U] = iv4;
+  r3[1U] = iv5;
+  r3[2U] = iv6;
+  r3[3U] = iv7;
+  uint32_t kk_shift_8 = kk << 8U;
+  uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn));
+  r0[0U] = iv0_;
+  r0[1U] = iv1;
+  r0[2U] = iv2;
+  r0[3U] = iv3;
+  r1[0U] = iv4;
+  r1[1U] = iv5;
+  r1[2U] = iv6;
+  r1[3U] = iv7;
+}
+
+static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll)
+{
+  uint64_t lb = (uint64_t)64U;
+  uint8_t b[64U] = { 0U };
+  memcpy(b, k, kk * sizeof (uint8_t));
+  if (ll == 0U)
+  {
+    update_block(wv, hash, true, lb, b);
+  }
+  else
+  {
+    update_block(wv, hash, false, lb, b);
+  }
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2s_update_multi(
+  uint32_t len,
+  uint32_t *wv,
+  uint32_t *hash,
+  uint64_t prev,
+  uint8_t *blocks,
+  uint32_t nb
+)
+{
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U);
+    uint8_t *b = blocks + i * 64U;
+    update_block(wv, hash, false, totlen, b);
+  }
+}
+
+void
+Hacl_Hash_Blake2s_update_last(
+  uint32_t len,
+  uint32_t *wv,
+  uint32_t *hash,
+  uint64_t prev,
+  uint32_t rem,
+  uint8_t *d
+)
+{
+  uint8_t b[64U] = { 0U };
+  uint8_t *last = d + len - rem;
+  memcpy(b, last, rem * sizeof (uint8_t));
+  uint64_t totlen = prev + (uint64_t)len;
+  update_block(wv, hash, true, totlen, b);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+static void
+update_blocks(uint32_t len, uint32_t *wv, uint32_t *hash, uint64_t prev, uint8_t *blocks)
+{
+  uint32_t nb0 = len / 64U;
+  uint32_t rem0 = len % 64U;
+  uint32_t nb;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    nb = nb0 - 1U;
+  }
+  else
+  {
+    nb = nb0;
+  }
+  uint32_t rem;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    rem = 64U;
+  }
+  else
+  {
+    rem = rem0;
+  }
+  Hacl_Hash_Blake2s_update_multi(len, wv, hash, prev, blocks, nb);
+  Hacl_Hash_Blake2s_update_last(len, wv, hash, prev, rem, blocks);
+}
+
+static inline void
+update(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
+{
+  uint64_t lb = (uint64_t)64U;
+  if (kk > 0U)
+  {
+    update_key(wv, hash, kk, k, ll);
+    if (!(ll == 0U))
+    {
+      update_blocks(ll, wv, hash, lb, d);
+      return;
+    }
+    return;
+  }
+  update_blocks(ll, wv, hash, (uint64_t)0U, d);
+}
+
+void Hacl_Hash_Blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash)
+{
+  uint8_t b[32U] = { 0U };
+  uint8_t *first = b;
+  uint8_t *second = b + 16U;
+  uint32_t *row0 = hash;
+  uint32_t *row1 = hash + 4U;
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(first + i * 4U, row0[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(second + i * 4U, row1[i]););
+  uint8_t *final = b;
+  memcpy(output, final, nn * sizeof (uint8_t));
+  Lib_Memzero0_memzero(b, 32U, uint8_t);
+}
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
+  uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
+  Hacl_Hash_Blake2s_block_state_t block_state = { .fst = wv, .snd = b };
+  Hacl_Hash_Blake2s_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  Hacl_Hash_Blake2s_state_t
+  *p = (Hacl_Hash_Blake2s_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2s_state_t));
+  p[0U] = s;
+  Hacl_Hash_Blake2s_init(block_state.snd, 0U, 32U);
+  return p;
+}
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *state)
+{
+  Hacl_Hash_Blake2s_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state;
+  Hacl_Hash_Blake2s_init(block_state.snd, 0U, 32U);
+  Hacl_Hash_Blake2s_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
+}
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint32_t chunk_len)
+{
+  Hacl_Hash_Blake2s_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    sz = 64U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  if (chunk_len <= 64U - sz)
+  {
+    Hacl_Hash_Blake2s_state_t s1 = *state;
+    Hacl_Hash_Blake2s_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_Hash_Blake2s_state_t s1 = *state;
+    Hacl_Hash_Blake2s_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      uint32_t *wv = block_state1.fst;
+      uint32_t *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    uint32_t *wv = block_state1.fst;
+    uint32_t *hash = block_state1.snd;
+    uint32_t nb = data1_len / 64U;
+    Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_Blake2s_state_t s1 = *state;
+    Hacl_Hash_Blake2s_block_state_t block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 64U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2
+        }
+      );
+    Hacl_Hash_Blake2s_state_t s10 = *state;
+    Hacl_Hash_Blake2s_block_state_t block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      uint32_t *wv = block_state1.fst;
+      uint32_t *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    uint32_t *wv = block_state1.fst;
+    uint32_t *hash = block_state1.snd;
+    uint32_t nb = data1_len / 64U;
+    Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+/**
+  Finish function when there is no key
+*/
+void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output)
+{
+  Hacl_Hash_Blake2s_state_t scrut = *state;
+  Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint32_t r;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    r = 64U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  uint8_t *buf_1 = buf_;
+  uint32_t wv0[16U] = { 0U };
+  uint32_t b[16U] = { 0U };
+  Hacl_Hash_Blake2s_block_state_t tmp_block_state = { .fst = wv0, .snd = b };
+  uint32_t *src_b = block_state.snd;
+  uint32_t *dst_b = tmp_block_state.snd;
+  memcpy(dst_b, src_b, 16U * sizeof (uint32_t));
+  uint64_t prev_len = total_len - (uint64_t)r;
+  uint32_t ite;
+  if (r % 64U == 0U && r > 0U)
+  {
+    ite = 64U;
+  }
+  else
+  {
+    ite = r % 64U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  uint32_t *wv1 = tmp_block_state.fst;
+  uint32_t *hash0 = tmp_block_state.snd;
+  uint32_t nb = 0U;
+  Hacl_Hash_Blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb);
+  uint64_t prev_len_last = total_len - (uint64_t)r;
+  uint32_t *wv = tmp_block_state.fst;
+  uint32_t *hash = tmp_block_state.snd;
+  Hacl_Hash_Blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
+  Hacl_Hash_Blake2s_finish(32U, output, tmp_block_state.snd);
+}
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state)
+{
+  Hacl_Hash_Blake2s_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state;
+  uint32_t *wv = block_state.fst;
+  uint32_t *b = block_state.snd;
+  KRML_HOST_FREE(wv);
+  KRML_HOST_FREE(b);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+/**
+Write the BLAKE2s digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2s_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+)
+{
+  uint32_t b[16U] = { 0U };
+  uint32_t b1[16U] = { 0U };
+  Hacl_Hash_Blake2s_init(b, key_len, output_len);
+  update(b1, b, key_len, key, input_len, input);
+  Hacl_Hash_Blake2s_finish(output_len, output, b);
+  Lib_Memzero0_memzero(b1, 16U, uint32_t);
+  Lib_Memzero0_memzero(b, 16U, uint32_t);
+}
+
diff --git a/src/Hacl_Hash_Blake2s_128.c b/src/Hacl_Hash_Blake2s_128.c
deleted file mode 100644
index 86c4f030..00000000
--- a/src/Hacl_Hash_Blake2s_128.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Hash_Blake2s_128.h"
-
-#include "internal/Hacl_Impl_Blake2_Constants.h"
-#include "internal/Hacl_Hash_Blake2.h"
-#include "lib_memzero0.h"
-
-static inline void
-blake2s_update_block(
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  bool flag,
-  uint64_t totlen,
-  uint8_t *d
-)
-{
-  uint32_t m_w[16U] = { 0U };
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint32_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_zero;
-  uint32_t wv_14;
-  if (flag)
-  {
-    wv_14 = (uint32_t)0xFFFFFFFFU;
-  }
-  else
-  {
-    wv_14 = (uint32_t)0U;
-  }
-  uint32_t wv_15 = (uint32_t)0U;
-  mask =
-    Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen,
-      (uint32_t)(totlen >> (uint32_t)32U),
-      wv_14,
-      wv_15);
-  memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Lib_IntVector_Intrinsics_vec128 *wv3 = wv + (uint32_t)3U;
-  wv3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv3[0U], mask);
-  KRML_MAYBE_FOR10(i,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
-    uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U;
-    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 m_st[4U] KRML_POST_ALIGN(16) = { 0U };
-    Lib_IntVector_Intrinsics_vec128 *r0 = m_st;
-    Lib_IntVector_Intrinsics_vec128 *r1 = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r20 = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r30 = m_st + (uint32_t)3U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
-    r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
-    r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
-    r20[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
-    r30[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
-    Lib_IntVector_Intrinsics_vec128 *x = m_st;
-    Lib_IntVector_Intrinsics_vec128 *y = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *z = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *w = m_st + (uint32_t)3U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * (uint32_t)1U;
-    wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]);
-    wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * (uint32_t)1U;
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]);
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * (uint32_t)1U;
-    wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * (uint32_t)1U;
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]);
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], (uint32_t)12U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * (uint32_t)1U;
-    wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]);
-    wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * (uint32_t)1U;
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]);
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], (uint32_t)8U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * (uint32_t)1U;
-    wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * (uint32_t)1U;
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]);
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], (uint32_t)7U);
-    Lib_IntVector_Intrinsics_vec128 *r10 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r21 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r31 = wv + (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 v00 = r10[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, (uint32_t)1U);
-    r10[0U] = v1;
-    Lib_IntVector_Intrinsics_vec128 v01 = r21[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, (uint32_t)2U);
-    r21[0U] = v10;
-    Lib_IntVector_Intrinsics_vec128 v02 = r31[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, (uint32_t)3U);
-    r31[0U] = v11;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * (uint32_t)1U;
-    wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]);
-    wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * (uint32_t)1U;
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]);
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * (uint32_t)1U;
-    wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * (uint32_t)1U;
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]);
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], (uint32_t)12U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * (uint32_t)1U;
-    wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]);
-    wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * (uint32_t)1U;
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]);
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], (uint32_t)8U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * (uint32_t)1U;
-    wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * (uint32_t)1U;
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]);
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], (uint32_t)7U);
-    Lib_IntVector_Intrinsics_vec128 *r11 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 v0 = r11[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, (uint32_t)3U);
-    r11[0U] = v12;
-    Lib_IntVector_Intrinsics_vec128 v03 = r2[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, (uint32_t)2U);
-    r2[0U] = v13;
-    Lib_IntVector_Intrinsics_vec128 v04 = r3[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, (uint32_t)1U);
-    r3[0U] = v14;);
-  Lib_IntVector_Intrinsics_vec128 *s0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *s1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r0 = wv;
-  Lib_IntVector_Intrinsics_vec128 *r1 = wv + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U;
-  s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r0[0U]);
-  s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r2[0U]);
-  s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r1[0U]);
-  s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r3[0U]);
-}
-
-void
-Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn)
-{
-  Lib_IntVector_Intrinsics_vec128 *r0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *r1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = hash + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = hash + (uint32_t)3U;
-  uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U];
-  uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U];
-  uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U];
-  uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U];
-  uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U];
-  uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U];
-  uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U];
-  uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U];
-  r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3);
-  r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
-  uint32_t kk_shift_8 = kk << (uint32_t)8U;
-  uint32_t iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn));
-  r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3);
-  r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
-}
-
-void
-Hacl_Blake2s_128_blake2s_update_key(
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-)
-{
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  uint8_t b[64U] = { 0U };
-  memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
-  {
-    blake2s_update_block(wv, hash, true, lb, b);
-  }
-  else
-  {
-    blake2s_update_block(wv, hash, false, lb, b);
-  }
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-void
-Hacl_Blake2s_128_blake2s_update_multi(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint64_t prev,
-  uint8_t *blocks,
-  uint32_t nb
-)
-{
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U);
-    uint8_t *b = blocks + i * (uint32_t)64U;
-    blake2s_update_block(wv, hash, false, totlen, b);
-  }
-}
-
-void
-Hacl_Blake2s_128_blake2s_update_last(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint64_t prev,
-  uint32_t rem,
-  uint8_t *d
-)
-{
-  uint8_t b[64U] = { 0U };
-  uint8_t *last = d + len - rem;
-  memcpy(b, last, rem * sizeof (uint8_t));
-  uint64_t totlen = prev + (uint64_t)len;
-  blake2s_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-static inline void
-blake2s_update_blocks(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint64_t prev,
-  uint8_t *blocks
-)
-{
-  uint32_t nb0 = len / (uint32_t)64U;
-  uint32_t rem0 = len % (uint32_t)64U;
-  K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
-  {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)64U;
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
-  }
-  else
-  {
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb0, .snd = rem0 });
-  }
-  uint32_t nb = scrut.fst;
-  uint32_t rem = scrut.snd;
-  Hacl_Blake2s_128_blake2s_update_multi(len, wv, hash, prev, blocks, nb);
-  Hacl_Blake2s_128_blake2s_update_last(len, wv, hash, prev, rem, blocks);
-}
-
-static inline void
-blake2s_update(
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll,
-  uint8_t *d
-)
-{
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  if (kk > (uint32_t)0U)
-  {
-    Hacl_Blake2s_128_blake2s_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
-    {
-      blake2s_update_blocks(ll, wv, hash, lb, d);
-      return;
-    }
-    return;
-  }
-  blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d);
-}
-
-void
-Hacl_Blake2s_128_blake2s_finish(
-  uint32_t nn,
-  uint8_t *output,
-  Lib_IntVector_Intrinsics_vec128 *hash
-)
-{
-  uint8_t b[32U] = { 0U };
-  uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec128 *row0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *row1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128_store32_le(first, row0[0U]);
-  Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]);
-  uint8_t *final = b;
-  memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)32U, uint8_t);
-}
-
-/**
-Write the BLAKE2s digest of message `d` using key `k` into `output`.
-
-@param nn Length of to-be-generated digest with 1 <= `nn` <= 32.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2s_128_blake2s(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-)
-{
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U };
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b1[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_init(b, kk, nn);
-  blake2s_update(b1, b, kk, k, ll, d);
-  Hacl_Blake2s_128_blake2s_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)4U, Lib_IntVector_Intrinsics_vec128);
-  Lib_Memzero0_memzero(b, (uint32_t)4U, Lib_IntVector_Intrinsics_vec128);
-}
-
-void
-Hacl_Blake2s_128_store_state128s_to_state32(
-  uint32_t *st32,
-  Lib_IntVector_Intrinsics_vec128 *st
-)
-{
-  Lib_IntVector_Intrinsics_vec128 *r0 = st;
-  Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U;
-  uint32_t *b0 = st32;
-  uint32_t *b1 = st32 + (uint32_t)4U;
-  uint32_t *b2 = st32 + (uint32_t)8U;
-  uint32_t *b3 = st32 + (uint32_t)12U;
-  uint8_t b8[16U] = { 0U };
-  Lib_IntVector_Intrinsics_vec128_store32_le(b8, r0[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = b0;
-    uint8_t *bj = b8 + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  uint8_t b80[16U] = { 0U };
-  Lib_IntVector_Intrinsics_vec128_store32_le(b80, r1[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = b1;
-    uint8_t *bj = b80 + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  uint8_t b81[16U] = { 0U };
-  Lib_IntVector_Intrinsics_vec128_store32_le(b81, r2[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = b2;
-    uint8_t *bj = b81 + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  uint8_t b82[16U] = { 0U };
-  Lib_IntVector_Intrinsics_vec128_store32_le(b82, r3[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = b3;
-    uint8_t *bj = b82 + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-}
-
-void
-Hacl_Blake2s_128_load_state128s_from_state32(
-  Lib_IntVector_Intrinsics_vec128 *st,
-  uint32_t *st32
-)
-{
-  Lib_IntVector_Intrinsics_vec128 *r0 = st;
-  Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U;
-  uint32_t *b0 = st32;
-  uint32_t *b1 = st32 + (uint32_t)4U;
-  uint32_t *b2 = st32 + (uint32_t)8U;
-  uint32_t *b3 = st32 + (uint32_t)12U;
-  r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b0[0U], b0[1U], b0[2U], b0[3U]);
-  r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b1[0U], b1[1U], b1[2U], b1[3U]);
-  r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b2[0U], b2[1U], b2[2U], b2[3U]);
-  r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b3[0U], b3[1U], b3[2U], b3[3U]);
-}
-
-Lib_IntVector_Intrinsics_vec128 *Hacl_Blake2s_128_blake2s_malloc(void)
-{
-  Lib_IntVector_Intrinsics_vec128
-  *buf =
-    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  return buf;
-}
-
diff --git a/src/Hacl_Hash_Blake2s_Simd128.c b/src/Hacl_Hash_Blake2s_Simd128.c
new file mode 100644
index 00000000..73f0cccb
--- /dev/null
+++ b/src/Hacl_Hash_Blake2s_Simd128.c
@@ -0,0 +1,794 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_Blake2s_Simd128.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "lib_memzero0.h"
+
+static inline void
+update_block(
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  bool flag,
+  uint64_t totlen,
+  uint8_t *d
+)
+{
+  uint32_t m_w[16U] = { 0U };
+  KRML_MAYBE_FOR16(i,
+    0U,
+    16U,
+    1U,
+    uint32_t *os = m_w;
+    uint8_t *bj = d + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_zero;
+  uint32_t wv_14;
+  if (flag)
+  {
+    wv_14 = 0xFFFFFFFFU;
+  }
+  else
+  {
+    wv_14 = 0U;
+  }
+  uint32_t wv_15 = 0U;
+  mask =
+    Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen,
+      (uint32_t)(totlen >> 32U),
+      wv_14,
+      wv_15);
+  memcpy(wv, hash, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Lib_IntVector_Intrinsics_vec128 *wv3 = wv + 3U;
+  wv3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv3[0U], mask);
+  KRML_MAYBE_FOR10(i,
+    0U,
+    10U,
+    1U,
+    uint32_t start_idx = i % 10U * 16U;
+    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 m_st[4U] KRML_POST_ALIGN(16) = { 0U };
+    Lib_IntVector_Intrinsics_vec128 *r0 = m_st;
+    Lib_IntVector_Intrinsics_vec128 *r1 = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r20 = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r30 = m_st + 3U;
+    uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U];
+    r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
+    r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
+    r20[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
+    r30[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
+    Lib_IntVector_Intrinsics_vec128 *x = m_st;
+    Lib_IntVector_Intrinsics_vec128 *y = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec128 *z = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec128 *w = m_st + 3U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * 1U;
+    wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]);
+    wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * 1U;
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]);
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], 16U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * 1U;
+    wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * 1U;
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]);
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], 12U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * 1U;
+    wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]);
+    wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * 1U;
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]);
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], 8U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * 1U;
+    wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * 1U;
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]);
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], 7U);
+    Lib_IntVector_Intrinsics_vec128 *r10 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r21 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r31 = wv + 3U;
+    Lib_IntVector_Intrinsics_vec128 v00 = r10[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, 1U);
+    r10[0U] = v1;
+    Lib_IntVector_Intrinsics_vec128 v01 = r21[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, 2U);
+    r21[0U] = v10;
+    Lib_IntVector_Intrinsics_vec128 v02 = r31[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, 3U);
+    r31[0U] = v11;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * 1U;
+    wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]);
+    wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * 1U;
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]);
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], 16U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * 1U;
+    wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * 1U;
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]);
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], 12U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * 1U;
+    wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]);
+    wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * 1U;
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]);
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], 8U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * 1U;
+    wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * 1U;
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]);
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], 7U);
+    Lib_IntVector_Intrinsics_vec128 *r11 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r2 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r3 = wv + 3U;
+    Lib_IntVector_Intrinsics_vec128 v0 = r11[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, 3U);
+    r11[0U] = v12;
+    Lib_IntVector_Intrinsics_vec128 v03 = r2[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, 2U);
+    r2[0U] = v13;
+    Lib_IntVector_Intrinsics_vec128 v04 = r3[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, 1U);
+    r3[0U] = v14;);
+  Lib_IntVector_Intrinsics_vec128 *s0 = hash;
+  Lib_IntVector_Intrinsics_vec128 *s1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r0 = wv;
+  Lib_IntVector_Intrinsics_vec128 *r1 = wv + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = wv + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = wv + 3U;
+  s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r0[0U]);
+  s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r2[0U]);
+  s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r1[0U]);
+  s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r3[0U]);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn)
+{
+  Lib_IntVector_Intrinsics_vec128 *r0 = hash;
+  Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U;
+  uint32_t iv0 = Hacl_Hash_Blake2s_ivTable_S[0U];
+  uint32_t iv1 = Hacl_Hash_Blake2s_ivTable_S[1U];
+  uint32_t iv2 = Hacl_Hash_Blake2s_ivTable_S[2U];
+  uint32_t iv3 = Hacl_Hash_Blake2s_ivTable_S[3U];
+  uint32_t iv4 = Hacl_Hash_Blake2s_ivTable_S[4U];
+  uint32_t iv5 = Hacl_Hash_Blake2s_ivTable_S[5U];
+  uint32_t iv6 = Hacl_Hash_Blake2s_ivTable_S[6U];
+  uint32_t iv7 = Hacl_Hash_Blake2s_ivTable_S[7U];
+  r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3);
+  r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
+  uint32_t kk_shift_8 = kk << 8U;
+  uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn));
+  r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3);
+  r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
+}
+
+static void
+update_key(
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint32_t kk,
+  uint8_t *k,
+  uint32_t ll
+)
+{
+  uint64_t lb = (uint64_t)64U;
+  uint8_t b[64U] = { 0U };
+  memcpy(b, k, kk * sizeof (uint8_t));
+  if (ll == 0U)
+  {
+    update_block(wv, hash, true, lb, b);
+  }
+  else
+  {
+    update_block(wv, hash, false, lb, b);
+  }
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_update_multi(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint64_t prev,
+  uint8_t *blocks,
+  uint32_t nb
+)
+{
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U);
+    uint8_t *b = blocks + i * 64U;
+    update_block(wv, hash, false, totlen, b);
+  }
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_update_last(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint64_t prev,
+  uint32_t rem,
+  uint8_t *d
+)
+{
+  uint8_t b[64U] = { 0U };
+  uint8_t *last = d + len - rem;
+  memcpy(b, last, rem * sizeof (uint8_t));
+  uint64_t totlen = prev + (uint64_t)len;
+  update_block(wv, hash, true, totlen, b);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+static inline void
+update_blocks(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint64_t prev,
+  uint8_t *blocks
+)
+{
+  uint32_t nb0 = len / 64U;
+  uint32_t rem0 = len % 64U;
+  uint32_t nb;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    nb = nb0 - 1U;
+  }
+  else
+  {
+    nb = nb0;
+  }
+  uint32_t rem;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    rem = 64U;
+  }
+  else
+  {
+    rem = rem0;
+  }
+  Hacl_Hash_Blake2s_Simd128_update_multi(len, wv, hash, prev, blocks, nb);
+  Hacl_Hash_Blake2s_Simd128_update_last(len, wv, hash, prev, rem, blocks);
+}
+
+static inline void
+update(
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint32_t kk,
+  uint8_t *k,
+  uint32_t ll,
+  uint8_t *d
+)
+{
+  uint64_t lb = (uint64_t)64U;
+  if (kk > 0U)
+  {
+    update_key(wv, hash, kk, k, ll);
+    if (!(ll == 0U))
+    {
+      update_blocks(ll, wv, hash, lb, d);
+      return;
+    }
+    return;
+  }
+  update_blocks(ll, wv, hash, (uint64_t)0U, d);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_finish(
+  uint32_t nn,
+  uint8_t *output,
+  Lib_IntVector_Intrinsics_vec128 *hash
+)
+{
+  uint8_t b[32U] = { 0U };
+  uint8_t *first = b;
+  uint8_t *second = b + 16U;
+  Lib_IntVector_Intrinsics_vec128 *row0 = hash;
+  Lib_IntVector_Intrinsics_vec128 *row1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec128_store32_le(first, row0[0U]);
+  Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]);
+  uint8_t *final = b;
+  memcpy(output, final, nn * sizeof (uint8_t));
+  Lib_Memzero0_memzero(b, 32U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32(
+  uint32_t *st32,
+  Lib_IntVector_Intrinsics_vec128 *st
+)
+{
+  Lib_IntVector_Intrinsics_vec128 *r0 = st;
+  Lib_IntVector_Intrinsics_vec128 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = st + 3U;
+  uint32_t *b0 = st32;
+  uint32_t *b1 = st32 + 4U;
+  uint32_t *b2 = st32 + 8U;
+  uint32_t *b3 = st32 + 12U;
+  uint8_t b8[16U] = { 0U };
+  Lib_IntVector_Intrinsics_vec128_store32_le(b8, r0[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = b0;
+    uint8_t *bj = b8 + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  uint8_t b80[16U] = { 0U };
+  Lib_IntVector_Intrinsics_vec128_store32_le(b80, r1[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = b1;
+    uint8_t *bj = b80 + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  uint8_t b81[16U] = { 0U };
+  Lib_IntVector_Intrinsics_vec128_store32_le(b81, r2[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = b2;
+    uint8_t *bj = b81 + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  uint8_t b82[16U] = { 0U };
+  Lib_IntVector_Intrinsics_vec128_store32_le(b82, r3[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = b3;
+    uint8_t *bj = b82 + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_load_state128s_from_state32(
+  Lib_IntVector_Intrinsics_vec128 *st,
+  uint32_t *st32
+)
+{
+  Lib_IntVector_Intrinsics_vec128 *r0 = st;
+  Lib_IntVector_Intrinsics_vec128 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = st + 3U;
+  uint32_t *b0 = st32;
+  uint32_t *b1 = st32 + 4U;
+  uint32_t *b2 = st32 + 8U;
+  uint32_t *b3 = st32 + 12U;
+  r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b0[0U], b0[1U], b0[2U], b0[3U]);
+  r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b1[0U], b1[1U], b1[2U], b1[3U]);
+  r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b2[0U], b2[1U], b2[2U], b2[3U]);
+  r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b3[0U], b3[1U], b3[2U], b3[3U]);
+}
+
+Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_Simd128_malloc_with_key(void)
+{
+  Lib_IntVector_Intrinsics_vec128
+  *buf =
+    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(buf, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  return buf;
+}
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  Lib_IntVector_Intrinsics_vec128
+  *wv =
+    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Lib_IntVector_Intrinsics_vec128
+  *b =
+    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state = { .fst = wv, .snd = b };
+  Hacl_Hash_Blake2s_Simd128_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  Hacl_Hash_Blake2s_Simd128_state_t
+  *p =
+    (Hacl_Hash_Blake2s_Simd128_state_t *)KRML_HOST_MALLOC(sizeof (
+        Hacl_Hash_Blake2s_Simd128_state_t
+      ));
+  p[0U] = s;
+  Hacl_Hash_Blake2s_Simd128_init(block_state.snd, 0U, 32U);
+  return p;
+}
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *state)
+{
+  Hacl_Hash_Blake2s_Simd128_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state;
+  Hacl_Hash_Blake2s_Simd128_init(block_state.snd, 0U, 32U);
+  Hacl_Hash_Blake2s_Simd128_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
+}
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2s_Simd128_update(
+  Hacl_Hash_Blake2s_Simd128_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+)
+{
+  Hacl_Hash_Blake2s_Simd128_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    sz = 64U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  if (chunk_len <= 64U - sz)
+  {
+    Hacl_Hash_Blake2s_Simd128_state_t s1 = *state;
+    Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_Hash_Blake2s_Simd128_state_t s1 = *state;
+    Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
+      Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
+    Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
+    uint32_t nb = data1_len / 64U;
+    Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_Blake2s_Simd128_state_t s1 = *state;
+    Hacl_Hash_Blake2s_Simd128_block_state_t block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 64U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_Simd128_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2
+        }
+      );
+    Hacl_Hash_Blake2s_Simd128_state_t s10 = *state;
+    Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
+      Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
+    Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
+    uint32_t nb = data1_len / 64U;
+    Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+/**
+  Finish function when there is no key
+*/
+void
+Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output)
+{
+  Hacl_Hash_Blake2s_Simd128_state_t scrut = *state;
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint32_t r;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    r = 64U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  uint8_t *buf_1 = buf_;
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U };
+  Hacl_Hash_Blake2s_Simd128_block_state_t tmp_block_state = { .fst = wv0, .snd = b };
+  Lib_IntVector_Intrinsics_vec128 *src_b = block_state.snd;
+  Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.snd;
+  memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  uint64_t prev_len = total_len - (uint64_t)r;
+  uint32_t ite;
+  if (r % 64U == 0U && r > 0U)
+  {
+    ite = 64U;
+  }
+  else
+  {
+    ite = r % 64U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  Lib_IntVector_Intrinsics_vec128 *wv1 = tmp_block_state.fst;
+  Lib_IntVector_Intrinsics_vec128 *hash0 = tmp_block_state.snd;
+  uint32_t nb = 0U;
+  Hacl_Hash_Blake2s_Simd128_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb);
+  uint64_t prev_len_last = total_len - (uint64_t)r;
+  Lib_IntVector_Intrinsics_vec128 *wv = tmp_block_state.fst;
+  Lib_IntVector_Intrinsics_vec128 *hash = tmp_block_state.snd;
+  Hacl_Hash_Blake2s_Simd128_update_last(r, wv, hash, prev_len_last, r, buf_last);
+  Hacl_Hash_Blake2s_Simd128_finish(32U, output, tmp_block_state.snd);
+}
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state)
+{
+  Hacl_Hash_Blake2s_Simd128_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state;
+  Lib_IntVector_Intrinsics_vec128 *wv = block_state.fst;
+  Lib_IntVector_Intrinsics_vec128 *b = block_state.snd;
+  KRML_ALIGNED_FREE(wv);
+  KRML_ALIGNED_FREE(b);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+/**
+Write the BLAKE2s digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2s_Simd128_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+)
+{
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U };
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b1[4U] KRML_POST_ALIGN(16) = { 0U };
+  Hacl_Hash_Blake2s_Simd128_init(b, key_len, output_len);
+  update(b1, b, key_len, key, input_len, input);
+  Hacl_Hash_Blake2s_Simd128_finish(output_len, output, b);
+  Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128);
+  Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128);
+}
+
diff --git a/src/Hacl_Hash_MD5.c b/src/Hacl_Hash_MD5.c
index 222ac824..ed294839 100644
--- a/src/Hacl_Hash_MD5.c
+++ b/src/Hacl_Hash_MD5.c
@@ -25,37 +25,29 @@
 
 #include "internal/Hacl_Hash_MD5.h"
 
-static uint32_t
-_h0[4U] =
-  { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
+static uint32_t _h0[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
 
 static uint32_t
 _t[64U] =
   {
-    (uint32_t)0xd76aa478U, (uint32_t)0xe8c7b756U, (uint32_t)0x242070dbU, (uint32_t)0xc1bdceeeU,
-    (uint32_t)0xf57c0fafU, (uint32_t)0x4787c62aU, (uint32_t)0xa8304613U, (uint32_t)0xfd469501U,
-    (uint32_t)0x698098d8U, (uint32_t)0x8b44f7afU, (uint32_t)0xffff5bb1U, (uint32_t)0x895cd7beU,
-    (uint32_t)0x6b901122U, (uint32_t)0xfd987193U, (uint32_t)0xa679438eU, (uint32_t)0x49b40821U,
-    (uint32_t)0xf61e2562U, (uint32_t)0xc040b340U, (uint32_t)0x265e5a51U, (uint32_t)0xe9b6c7aaU,
-    (uint32_t)0xd62f105dU, (uint32_t)0x02441453U, (uint32_t)0xd8a1e681U, (uint32_t)0xe7d3fbc8U,
-    (uint32_t)0x21e1cde6U, (uint32_t)0xc33707d6U, (uint32_t)0xf4d50d87U, (uint32_t)0x455a14edU,
-    (uint32_t)0xa9e3e905U, (uint32_t)0xfcefa3f8U, (uint32_t)0x676f02d9U, (uint32_t)0x8d2a4c8aU,
-    (uint32_t)0xfffa3942U, (uint32_t)0x8771f681U, (uint32_t)0x6d9d6122U, (uint32_t)0xfde5380cU,
-    (uint32_t)0xa4beea44U, (uint32_t)0x4bdecfa9U, (uint32_t)0xf6bb4b60U, (uint32_t)0xbebfbc70U,
-    (uint32_t)0x289b7ec6U, (uint32_t)0xeaa127faU, (uint32_t)0xd4ef3085U, (uint32_t)0x4881d05U,
-    (uint32_t)0xd9d4d039U, (uint32_t)0xe6db99e5U, (uint32_t)0x1fa27cf8U, (uint32_t)0xc4ac5665U,
-    (uint32_t)0xf4292244U, (uint32_t)0x432aff97U, (uint32_t)0xab9423a7U, (uint32_t)0xfc93a039U,
-    (uint32_t)0x655b59c3U, (uint32_t)0x8f0ccc92U, (uint32_t)0xffeff47dU, (uint32_t)0x85845dd1U,
-    (uint32_t)0x6fa87e4fU, (uint32_t)0xfe2ce6e0U, (uint32_t)0xa3014314U, (uint32_t)0x4e0811a1U,
-    (uint32_t)0xf7537e82U, (uint32_t)0xbd3af235U, (uint32_t)0x2ad7d2bbU, (uint32_t)0xeb86d391U
+    0xd76aa478U, 0xe8c7b756U, 0x242070dbU, 0xc1bdceeeU, 0xf57c0fafU, 0x4787c62aU, 0xa8304613U,
+    0xfd469501U, 0x698098d8U, 0x8b44f7afU, 0xffff5bb1U, 0x895cd7beU, 0x6b901122U, 0xfd987193U,
+    0xa679438eU, 0x49b40821U, 0xf61e2562U, 0xc040b340U, 0x265e5a51U, 0xe9b6c7aaU, 0xd62f105dU,
+    0x02441453U, 0xd8a1e681U, 0xe7d3fbc8U, 0x21e1cde6U, 0xc33707d6U, 0xf4d50d87U, 0x455a14edU,
+    0xa9e3e905U, 0xfcefa3f8U, 0x676f02d9U, 0x8d2a4c8aU, 0xfffa3942U, 0x8771f681U, 0x6d9d6122U,
+    0xfde5380cU, 0xa4beea44U, 0x4bdecfa9U, 0xf6bb4b60U, 0xbebfbc70U, 0x289b7ec6U, 0xeaa127faU,
+    0xd4ef3085U, 0x4881d05U, 0xd9d4d039U, 0xe6db99e5U, 0x1fa27cf8U, 0xc4ac5665U, 0xf4292244U,
+    0x432aff97U, 0xab9423a7U, 0xfc93a039U, 0x655b59c3U, 0x8f0ccc92U, 0xffeff47dU, 0x85845dd1U,
+    0x6fa87e4fU, 0xfe2ce6e0U, 0xa3014314U, 0x4e0811a1U, 0xf7537e82U, 0xbd3af235U, 0x2ad7d2bbU,
+    0xeb86d391U
   };
 
-void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s)
+void Hacl_Hash_MD5_init(uint32_t *s)
 {
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, s[i] = _h0[i];);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, s[i] = _h0[i];);
 }
 
-static void legacy_update(uint32_t *abcd, uint8_t *x)
+static void update(uint32_t *abcd, uint8_t *x)
 {
   uint32_t aa = abcd[0U];
   uint32_t bb = abcd[1U];
@@ -74,14 +66,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb0
     +
       ((va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0)
-      << (uint32_t)7U
-      | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> (uint32_t)25U);
+      << 7U
+      | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> 25U);
   abcd[0U] = v;
   uint32_t va0 = abcd[3U];
   uint32_t vb1 = abcd[0U];
   uint32_t vc1 = abcd[1U];
   uint32_t vd1 = abcd[2U];
-  uint8_t *b1 = x + (uint32_t)4U;
+  uint8_t *b1 = x + 4U;
   uint32_t u0 = load32_le(b1);
   uint32_t xk0 = u0;
   uint32_t ti1 = _t[1U];
@@ -90,14 +82,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb1
     +
       ((va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1)
-      << (uint32_t)12U
-      | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> (uint32_t)20U);
+      << 12U
+      | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> 20U);
   abcd[3U] = v0;
   uint32_t va1 = abcd[2U];
   uint32_t vb2 = abcd[3U];
   uint32_t vc2 = abcd[0U];
   uint32_t vd2 = abcd[1U];
-  uint8_t *b2 = x + (uint32_t)8U;
+  uint8_t *b2 = x + 8U;
   uint32_t u1 = load32_le(b2);
   uint32_t xk1 = u1;
   uint32_t ti2 = _t[2U];
@@ -106,14 +98,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb2
     +
       ((va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2)
-      << (uint32_t)17U
-      | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> (uint32_t)15U);
+      << 17U
+      | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> 15U);
   abcd[2U] = v1;
   uint32_t va2 = abcd[1U];
   uint32_t vb3 = abcd[2U];
   uint32_t vc3 = abcd[3U];
   uint32_t vd3 = abcd[0U];
-  uint8_t *b3 = x + (uint32_t)12U;
+  uint8_t *b3 = x + 12U;
   uint32_t u2 = load32_le(b3);
   uint32_t xk2 = u2;
   uint32_t ti3 = _t[3U];
@@ -122,14 +114,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb3
     +
       ((va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3)
-      << (uint32_t)22U
-      | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> (uint32_t)10U);
+      << 22U
+      | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> 10U);
   abcd[1U] = v2;
   uint32_t va3 = abcd[0U];
   uint32_t vb4 = abcd[1U];
   uint32_t vc4 = abcd[2U];
   uint32_t vd4 = abcd[3U];
-  uint8_t *b4 = x + (uint32_t)16U;
+  uint8_t *b4 = x + 16U;
   uint32_t u3 = load32_le(b4);
   uint32_t xk3 = u3;
   uint32_t ti4 = _t[4U];
@@ -138,14 +130,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb4
     +
       ((va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4)
-      << (uint32_t)7U
-      | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> (uint32_t)25U);
+      << 7U
+      | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> 25U);
   abcd[0U] = v3;
   uint32_t va4 = abcd[3U];
   uint32_t vb5 = abcd[0U];
   uint32_t vc5 = abcd[1U];
   uint32_t vd5 = abcd[2U];
-  uint8_t *b5 = x + (uint32_t)20U;
+  uint8_t *b5 = x + 20U;
   uint32_t u4 = load32_le(b5);
   uint32_t xk4 = u4;
   uint32_t ti5 = _t[5U];
@@ -154,14 +146,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb5
     +
       ((va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5)
-      << (uint32_t)12U
-      | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> (uint32_t)20U);
+      << 12U
+      | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> 20U);
   abcd[3U] = v4;
   uint32_t va5 = abcd[2U];
   uint32_t vb6 = abcd[3U];
   uint32_t vc6 = abcd[0U];
   uint32_t vd6 = abcd[1U];
-  uint8_t *b6 = x + (uint32_t)24U;
+  uint8_t *b6 = x + 24U;
   uint32_t u5 = load32_le(b6);
   uint32_t xk5 = u5;
   uint32_t ti6 = _t[6U];
@@ -170,14 +162,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb6
     +
       ((va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6)
-      << (uint32_t)17U
-      | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> (uint32_t)15U);
+      << 17U
+      | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> 15U);
   abcd[2U] = v5;
   uint32_t va6 = abcd[1U];
   uint32_t vb7 = abcd[2U];
   uint32_t vc7 = abcd[3U];
   uint32_t vd7 = abcd[0U];
-  uint8_t *b7 = x + (uint32_t)28U;
+  uint8_t *b7 = x + 28U;
   uint32_t u6 = load32_le(b7);
   uint32_t xk6 = u6;
   uint32_t ti7 = _t[7U];
@@ -186,14 +178,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb7
     +
       ((va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7)
-      << (uint32_t)22U
-      | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> (uint32_t)10U);
+      << 22U
+      | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> 10U);
   abcd[1U] = v6;
   uint32_t va7 = abcd[0U];
   uint32_t vb8 = abcd[1U];
   uint32_t vc8 = abcd[2U];
   uint32_t vd8 = abcd[3U];
-  uint8_t *b8 = x + (uint32_t)32U;
+  uint8_t *b8 = x + 32U;
   uint32_t u7 = load32_le(b8);
   uint32_t xk7 = u7;
   uint32_t ti8 = _t[8U];
@@ -202,14 +194,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb8
     +
       ((va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8)
-      << (uint32_t)7U
-      | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> (uint32_t)25U);
+      << 7U
+      | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> 25U);
   abcd[0U] = v7;
   uint32_t va8 = abcd[3U];
   uint32_t vb9 = abcd[0U];
   uint32_t vc9 = abcd[1U];
   uint32_t vd9 = abcd[2U];
-  uint8_t *b9 = x + (uint32_t)36U;
+  uint8_t *b9 = x + 36U;
   uint32_t u8 = load32_le(b9);
   uint32_t xk8 = u8;
   uint32_t ti9 = _t[9U];
@@ -218,14 +210,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb9
     +
       ((va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9)
-      << (uint32_t)12U
-      | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> (uint32_t)20U);
+      << 12U
+      | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> 20U);
   abcd[3U] = v8;
   uint32_t va9 = abcd[2U];
   uint32_t vb10 = abcd[3U];
   uint32_t vc10 = abcd[0U];
   uint32_t vd10 = abcd[1U];
-  uint8_t *b10 = x + (uint32_t)40U;
+  uint8_t *b10 = x + 40U;
   uint32_t u9 = load32_le(b10);
   uint32_t xk9 = u9;
   uint32_t ti10 = _t[10U];
@@ -234,14 +226,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb10
     +
       ((va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10)
-      << (uint32_t)17U
-      | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> (uint32_t)15U);
+      << 17U
+      | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> 15U);
   abcd[2U] = v9;
   uint32_t va10 = abcd[1U];
   uint32_t vb11 = abcd[2U];
   uint32_t vc11 = abcd[3U];
   uint32_t vd11 = abcd[0U];
-  uint8_t *b11 = x + (uint32_t)44U;
+  uint8_t *b11 = x + 44U;
   uint32_t u10 = load32_le(b11);
   uint32_t xk10 = u10;
   uint32_t ti11 = _t[11U];
@@ -250,14 +242,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb11
     +
       ((va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11)
-      << (uint32_t)22U
-      | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> (uint32_t)10U);
+      << 22U
+      | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> 10U);
   abcd[1U] = v10;
   uint32_t va11 = abcd[0U];
   uint32_t vb12 = abcd[1U];
   uint32_t vc12 = abcd[2U];
   uint32_t vd12 = abcd[3U];
-  uint8_t *b12 = x + (uint32_t)48U;
+  uint8_t *b12 = x + 48U;
   uint32_t u11 = load32_le(b12);
   uint32_t xk11 = u11;
   uint32_t ti12 = _t[12U];
@@ -266,14 +258,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb12
     +
       ((va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12)
-      << (uint32_t)7U
-      | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> (uint32_t)25U);
+      << 7U
+      | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> 25U);
   abcd[0U] = v11;
   uint32_t va12 = abcd[3U];
   uint32_t vb13 = abcd[0U];
   uint32_t vc13 = abcd[1U];
   uint32_t vd13 = abcd[2U];
-  uint8_t *b13 = x + (uint32_t)52U;
+  uint8_t *b13 = x + 52U;
   uint32_t u12 = load32_le(b13);
   uint32_t xk12 = u12;
   uint32_t ti13 = _t[13U];
@@ -282,14 +274,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb13
     +
       ((va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13)
-      << (uint32_t)12U
-      | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> (uint32_t)20U);
+      << 12U
+      | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> 20U);
   abcd[3U] = v12;
   uint32_t va13 = abcd[2U];
   uint32_t vb14 = abcd[3U];
   uint32_t vc14 = abcd[0U];
   uint32_t vd14 = abcd[1U];
-  uint8_t *b14 = x + (uint32_t)56U;
+  uint8_t *b14 = x + 56U;
   uint32_t u13 = load32_le(b14);
   uint32_t xk13 = u13;
   uint32_t ti14 = _t[14U];
@@ -298,14 +290,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb14
     +
       ((va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14)
-      << (uint32_t)17U
-      | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> (uint32_t)15U);
+      << 17U
+      | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> 15U);
   abcd[2U] = v13;
   uint32_t va14 = abcd[1U];
   uint32_t vb15 = abcd[2U];
   uint32_t vc15 = abcd[3U];
   uint32_t vd15 = abcd[0U];
-  uint8_t *b15 = x + (uint32_t)60U;
+  uint8_t *b15 = x + 60U;
   uint32_t u14 = load32_le(b15);
   uint32_t xk14 = u14;
   uint32_t ti15 = _t[15U];
@@ -314,14 +306,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb15
     +
       ((va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15)
-      << (uint32_t)22U
-      | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> (uint32_t)10U);
+      << 22U
+      | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> 10U);
   abcd[1U] = v14;
   uint32_t va15 = abcd[0U];
   uint32_t vb16 = abcd[1U];
   uint32_t vc16 = abcd[2U];
   uint32_t vd16 = abcd[3U];
-  uint8_t *b16 = x + (uint32_t)4U;
+  uint8_t *b16 = x + 4U;
   uint32_t u15 = load32_le(b16);
   uint32_t xk15 = u15;
   uint32_t ti16 = _t[16U];
@@ -330,14 +322,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb16
     +
       ((va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16)
-      << (uint32_t)5U
-      | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> (uint32_t)27U);
+      << 5U
+      | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> 27U);
   abcd[0U] = v15;
   uint32_t va16 = abcd[3U];
   uint32_t vb17 = abcd[0U];
   uint32_t vc17 = abcd[1U];
   uint32_t vd17 = abcd[2U];
-  uint8_t *b17 = x + (uint32_t)24U;
+  uint8_t *b17 = x + 24U;
   uint32_t u16 = load32_le(b17);
   uint32_t xk16 = u16;
   uint32_t ti17 = _t[17U];
@@ -346,14 +338,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb17
     +
       ((va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17)
-      << (uint32_t)9U
-      | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> (uint32_t)23U);
+      << 9U
+      | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> 23U);
   abcd[3U] = v16;
   uint32_t va17 = abcd[2U];
   uint32_t vb18 = abcd[3U];
   uint32_t vc18 = abcd[0U];
   uint32_t vd18 = abcd[1U];
-  uint8_t *b18 = x + (uint32_t)44U;
+  uint8_t *b18 = x + 44U;
   uint32_t u17 = load32_le(b18);
   uint32_t xk17 = u17;
   uint32_t ti18 = _t[18U];
@@ -362,8 +354,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb18
     +
       ((va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18)
-      << (uint32_t)14U
-      | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> (uint32_t)18U);
+      << 14U
+      | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> 18U);
   abcd[2U] = v17;
   uint32_t va18 = abcd[1U];
   uint32_t vb19 = abcd[2U];
@@ -378,14 +370,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb19
     +
       ((va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19)
-      << (uint32_t)20U
-      | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> (uint32_t)12U);
+      << 20U
+      | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> 12U);
   abcd[1U] = v18;
   uint32_t va19 = abcd[0U];
   uint32_t vb20 = abcd[1U];
   uint32_t vc20 = abcd[2U];
   uint32_t vd20 = abcd[3U];
-  uint8_t *b20 = x + (uint32_t)20U;
+  uint8_t *b20 = x + 20U;
   uint32_t u19 = load32_le(b20);
   uint32_t xk19 = u19;
   uint32_t ti20 = _t[20U];
@@ -394,14 +386,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb20
     +
       ((va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20)
-      << (uint32_t)5U
-      | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> (uint32_t)27U);
+      << 5U
+      | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> 27U);
   abcd[0U] = v19;
   uint32_t va20 = abcd[3U];
   uint32_t vb21 = abcd[0U];
   uint32_t vc21 = abcd[1U];
   uint32_t vd21 = abcd[2U];
-  uint8_t *b21 = x + (uint32_t)40U;
+  uint8_t *b21 = x + 40U;
   uint32_t u20 = load32_le(b21);
   uint32_t xk20 = u20;
   uint32_t ti21 = _t[21U];
@@ -410,14 +402,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb21
     +
       ((va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21)
-      << (uint32_t)9U
-      | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> (uint32_t)23U);
+      << 9U
+      | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> 23U);
   abcd[3U] = v20;
   uint32_t va21 = abcd[2U];
   uint32_t vb22 = abcd[3U];
   uint32_t vc22 = abcd[0U];
   uint32_t vd22 = abcd[1U];
-  uint8_t *b22 = x + (uint32_t)60U;
+  uint8_t *b22 = x + 60U;
   uint32_t u21 = load32_le(b22);
   uint32_t xk21 = u21;
   uint32_t ti22 = _t[22U];
@@ -426,14 +418,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb22
     +
       ((va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22)
-      << (uint32_t)14U
-      | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> (uint32_t)18U);
+      << 14U
+      | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> 18U);
   abcd[2U] = v21;
   uint32_t va22 = abcd[1U];
   uint32_t vb23 = abcd[2U];
   uint32_t vc23 = abcd[3U];
   uint32_t vd23 = abcd[0U];
-  uint8_t *b23 = x + (uint32_t)16U;
+  uint8_t *b23 = x + 16U;
   uint32_t u22 = load32_le(b23);
   uint32_t xk22 = u22;
   uint32_t ti23 = _t[23U];
@@ -442,14 +434,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb23
     +
       ((va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23)
-      << (uint32_t)20U
-      | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> (uint32_t)12U);
+      << 20U
+      | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> 12U);
   abcd[1U] = v22;
   uint32_t va23 = abcd[0U];
   uint32_t vb24 = abcd[1U];
   uint32_t vc24 = abcd[2U];
   uint32_t vd24 = abcd[3U];
-  uint8_t *b24 = x + (uint32_t)36U;
+  uint8_t *b24 = x + 36U;
   uint32_t u23 = load32_le(b24);
   uint32_t xk23 = u23;
   uint32_t ti24 = _t[24U];
@@ -458,14 +450,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb24
     +
       ((va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24)
-      << (uint32_t)5U
-      | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> (uint32_t)27U);
+      << 5U
+      | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> 27U);
   abcd[0U] = v23;
   uint32_t va24 = abcd[3U];
   uint32_t vb25 = abcd[0U];
   uint32_t vc25 = abcd[1U];
   uint32_t vd25 = abcd[2U];
-  uint8_t *b25 = x + (uint32_t)56U;
+  uint8_t *b25 = x + 56U;
   uint32_t u24 = load32_le(b25);
   uint32_t xk24 = u24;
   uint32_t ti25 = _t[25U];
@@ -474,14 +466,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb25
     +
       ((va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25)
-      << (uint32_t)9U
-      | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> (uint32_t)23U);
+      << 9U
+      | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> 23U);
   abcd[3U] = v24;
   uint32_t va25 = abcd[2U];
   uint32_t vb26 = abcd[3U];
   uint32_t vc26 = abcd[0U];
   uint32_t vd26 = abcd[1U];
-  uint8_t *b26 = x + (uint32_t)12U;
+  uint8_t *b26 = x + 12U;
   uint32_t u25 = load32_le(b26);
   uint32_t xk25 = u25;
   uint32_t ti26 = _t[26U];
@@ -490,14 +482,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb26
     +
       ((va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26)
-      << (uint32_t)14U
-      | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> (uint32_t)18U);
+      << 14U
+      | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> 18U);
   abcd[2U] = v25;
   uint32_t va26 = abcd[1U];
   uint32_t vb27 = abcd[2U];
   uint32_t vc27 = abcd[3U];
   uint32_t vd27 = abcd[0U];
-  uint8_t *b27 = x + (uint32_t)32U;
+  uint8_t *b27 = x + 32U;
   uint32_t u26 = load32_le(b27);
   uint32_t xk26 = u26;
   uint32_t ti27 = _t[27U];
@@ -506,14 +498,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb27
     +
       ((va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27)
-      << (uint32_t)20U
-      | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> (uint32_t)12U);
+      << 20U
+      | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> 12U);
   abcd[1U] = v26;
   uint32_t va27 = abcd[0U];
   uint32_t vb28 = abcd[1U];
   uint32_t vc28 = abcd[2U];
   uint32_t vd28 = abcd[3U];
-  uint8_t *b28 = x + (uint32_t)52U;
+  uint8_t *b28 = x + 52U;
   uint32_t u27 = load32_le(b28);
   uint32_t xk27 = u27;
   uint32_t ti28 = _t[28U];
@@ -522,14 +514,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb28
     +
       ((va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28)
-      << (uint32_t)5U
-      | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> (uint32_t)27U);
+      << 5U
+      | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> 27U);
   abcd[0U] = v27;
   uint32_t va28 = abcd[3U];
   uint32_t vb29 = abcd[0U];
   uint32_t vc29 = abcd[1U];
   uint32_t vd29 = abcd[2U];
-  uint8_t *b29 = x + (uint32_t)8U;
+  uint8_t *b29 = x + 8U;
   uint32_t u28 = load32_le(b29);
   uint32_t xk28 = u28;
   uint32_t ti29 = _t[29U];
@@ -538,14 +530,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb29
     +
       ((va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29)
-      << (uint32_t)9U
-      | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> (uint32_t)23U);
+      << 9U
+      | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> 23U);
   abcd[3U] = v28;
   uint32_t va29 = abcd[2U];
   uint32_t vb30 = abcd[3U];
   uint32_t vc30 = abcd[0U];
   uint32_t vd30 = abcd[1U];
-  uint8_t *b30 = x + (uint32_t)28U;
+  uint8_t *b30 = x + 28U;
   uint32_t u29 = load32_le(b30);
   uint32_t xk29 = u29;
   uint32_t ti30 = _t[30U];
@@ -554,14 +546,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb30
     +
       ((va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30)
-      << (uint32_t)14U
-      | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> (uint32_t)18U);
+      << 14U
+      | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> 18U);
   abcd[2U] = v29;
   uint32_t va30 = abcd[1U];
   uint32_t vb31 = abcd[2U];
   uint32_t vc31 = abcd[3U];
   uint32_t vd31 = abcd[0U];
-  uint8_t *b31 = x + (uint32_t)48U;
+  uint8_t *b31 = x + 48U;
   uint32_t u30 = load32_le(b31);
   uint32_t xk30 = u30;
   uint32_t ti31 = _t[31U];
@@ -570,14 +562,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb31
     +
       ((va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31)
-      << (uint32_t)20U
-      | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> (uint32_t)12U);
+      << 20U
+      | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> 12U);
   abcd[1U] = v30;
   uint32_t va31 = abcd[0U];
   uint32_t vb32 = abcd[1U];
   uint32_t vc32 = abcd[2U];
   uint32_t vd32 = abcd[3U];
-  uint8_t *b32 = x + (uint32_t)20U;
+  uint8_t *b32 = x + 20U;
   uint32_t u31 = load32_le(b32);
   uint32_t xk31 = u31;
   uint32_t ti32 = _t[32U];
@@ -586,14 +578,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb32
     +
       ((va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32)
-      << (uint32_t)4U
-      | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> (uint32_t)28U);
+      << 4U
+      | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> 28U);
   abcd[0U] = v31;
   uint32_t va32 = abcd[3U];
   uint32_t vb33 = abcd[0U];
   uint32_t vc33 = abcd[1U];
   uint32_t vd33 = abcd[2U];
-  uint8_t *b33 = x + (uint32_t)32U;
+  uint8_t *b33 = x + 32U;
   uint32_t u32 = load32_le(b33);
   uint32_t xk32 = u32;
   uint32_t ti33 = _t[33U];
@@ -602,14 +594,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb33
     +
       ((va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33)
-      << (uint32_t)11U
-      | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> (uint32_t)21U);
+      << 11U
+      | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> 21U);
   abcd[3U] = v32;
   uint32_t va33 = abcd[2U];
   uint32_t vb34 = abcd[3U];
   uint32_t vc34 = abcd[0U];
   uint32_t vd34 = abcd[1U];
-  uint8_t *b34 = x + (uint32_t)44U;
+  uint8_t *b34 = x + 44U;
   uint32_t u33 = load32_le(b34);
   uint32_t xk33 = u33;
   uint32_t ti34 = _t[34U];
@@ -618,14 +610,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb34
     +
       ((va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34)
-      << (uint32_t)16U
-      | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> (uint32_t)16U);
+      << 16U
+      | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> 16U);
   abcd[2U] = v33;
   uint32_t va34 = abcd[1U];
   uint32_t vb35 = abcd[2U];
   uint32_t vc35 = abcd[3U];
   uint32_t vd35 = abcd[0U];
-  uint8_t *b35 = x + (uint32_t)56U;
+  uint8_t *b35 = x + 56U;
   uint32_t u34 = load32_le(b35);
   uint32_t xk34 = u34;
   uint32_t ti35 = _t[35U];
@@ -634,14 +626,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb35
     +
       ((va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35)
-      << (uint32_t)23U
-      | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> (uint32_t)9U);
+      << 23U
+      | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> 9U);
   abcd[1U] = v34;
   uint32_t va35 = abcd[0U];
   uint32_t vb36 = abcd[1U];
   uint32_t vc36 = abcd[2U];
   uint32_t vd36 = abcd[3U];
-  uint8_t *b36 = x + (uint32_t)4U;
+  uint8_t *b36 = x + 4U;
   uint32_t u35 = load32_le(b36);
   uint32_t xk35 = u35;
   uint32_t ti36 = _t[36U];
@@ -650,14 +642,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb36
     +
       ((va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36)
-      << (uint32_t)4U
-      | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> (uint32_t)28U);
+      << 4U
+      | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> 28U);
   abcd[0U] = v35;
   uint32_t va36 = abcd[3U];
   uint32_t vb37 = abcd[0U];
   uint32_t vc37 = abcd[1U];
   uint32_t vd37 = abcd[2U];
-  uint8_t *b37 = x + (uint32_t)16U;
+  uint8_t *b37 = x + 16U;
   uint32_t u36 = load32_le(b37);
   uint32_t xk36 = u36;
   uint32_t ti37 = _t[37U];
@@ -666,14 +658,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb37
     +
       ((va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37)
-      << (uint32_t)11U
-      | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> (uint32_t)21U);
+      << 11U
+      | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> 21U);
   abcd[3U] = v36;
   uint32_t va37 = abcd[2U];
   uint32_t vb38 = abcd[3U];
   uint32_t vc38 = abcd[0U];
   uint32_t vd38 = abcd[1U];
-  uint8_t *b38 = x + (uint32_t)28U;
+  uint8_t *b38 = x + 28U;
   uint32_t u37 = load32_le(b38);
   uint32_t xk37 = u37;
   uint32_t ti38 = _t[38U];
@@ -682,14 +674,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb38
     +
       ((va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38)
-      << (uint32_t)16U
-      | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> (uint32_t)16U);
+      << 16U
+      | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> 16U);
   abcd[2U] = v37;
   uint32_t va38 = abcd[1U];
   uint32_t vb39 = abcd[2U];
   uint32_t vc39 = abcd[3U];
   uint32_t vd39 = abcd[0U];
-  uint8_t *b39 = x + (uint32_t)40U;
+  uint8_t *b39 = x + 40U;
   uint32_t u38 = load32_le(b39);
   uint32_t xk38 = u38;
   uint32_t ti39 = _t[39U];
@@ -698,14 +690,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb39
     +
       ((va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39)
-      << (uint32_t)23U
-      | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> (uint32_t)9U);
+      << 23U
+      | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> 9U);
   abcd[1U] = v38;
   uint32_t va39 = abcd[0U];
   uint32_t vb40 = abcd[1U];
   uint32_t vc40 = abcd[2U];
   uint32_t vd40 = abcd[3U];
-  uint8_t *b40 = x + (uint32_t)52U;
+  uint8_t *b40 = x + 52U;
   uint32_t u39 = load32_le(b40);
   uint32_t xk39 = u39;
   uint32_t ti40 = _t[40U];
@@ -714,8 +706,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb40
     +
       ((va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40)
-      << (uint32_t)4U
-      | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> (uint32_t)28U);
+      << 4U
+      | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> 28U);
   abcd[0U] = v39;
   uint32_t va40 = abcd[3U];
   uint32_t vb41 = abcd[0U];
@@ -730,14 +722,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb41
     +
       ((va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41)
-      << (uint32_t)11U
-      | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> (uint32_t)21U);
+      << 11U
+      | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> 21U);
   abcd[3U] = v40;
   uint32_t va41 = abcd[2U];
   uint32_t vb42 = abcd[3U];
   uint32_t vc42 = abcd[0U];
   uint32_t vd42 = abcd[1U];
-  uint8_t *b42 = x + (uint32_t)12U;
+  uint8_t *b42 = x + 12U;
   uint32_t u41 = load32_le(b42);
   uint32_t xk41 = u41;
   uint32_t ti42 = _t[42U];
@@ -746,14 +738,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb42
     +
       ((va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42)
-      << (uint32_t)16U
-      | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> (uint32_t)16U);
+      << 16U
+      | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> 16U);
   abcd[2U] = v41;
   uint32_t va42 = abcd[1U];
   uint32_t vb43 = abcd[2U];
   uint32_t vc43 = abcd[3U];
   uint32_t vd43 = abcd[0U];
-  uint8_t *b43 = x + (uint32_t)24U;
+  uint8_t *b43 = x + 24U;
   uint32_t u42 = load32_le(b43);
   uint32_t xk42 = u42;
   uint32_t ti43 = _t[43U];
@@ -762,14 +754,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb43
     +
       ((va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43)
-      << (uint32_t)23U
-      | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> (uint32_t)9U);
+      << 23U
+      | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> 9U);
   abcd[1U] = v42;
   uint32_t va43 = abcd[0U];
   uint32_t vb44 = abcd[1U];
   uint32_t vc44 = abcd[2U];
   uint32_t vd44 = abcd[3U];
-  uint8_t *b44 = x + (uint32_t)36U;
+  uint8_t *b44 = x + 36U;
   uint32_t u43 = load32_le(b44);
   uint32_t xk43 = u43;
   uint32_t ti44 = _t[44U];
@@ -778,14 +770,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb44
     +
       ((va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44)
-      << (uint32_t)4U
-      | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> (uint32_t)28U);
+      << 4U
+      | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> 28U);
   abcd[0U] = v43;
   uint32_t va44 = abcd[3U];
   uint32_t vb45 = abcd[0U];
   uint32_t vc45 = abcd[1U];
   uint32_t vd45 = abcd[2U];
-  uint8_t *b45 = x + (uint32_t)48U;
+  uint8_t *b45 = x + 48U;
   uint32_t u44 = load32_le(b45);
   uint32_t xk44 = u44;
   uint32_t ti45 = _t[45U];
@@ -794,14 +786,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb45
     +
       ((va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45)
-      << (uint32_t)11U
-      | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> (uint32_t)21U);
+      << 11U
+      | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> 21U);
   abcd[3U] = v44;
   uint32_t va45 = abcd[2U];
   uint32_t vb46 = abcd[3U];
   uint32_t vc46 = abcd[0U];
   uint32_t vd46 = abcd[1U];
-  uint8_t *b46 = x + (uint32_t)60U;
+  uint8_t *b46 = x + 60U;
   uint32_t u45 = load32_le(b46);
   uint32_t xk45 = u45;
   uint32_t ti46 = _t[46U];
@@ -810,14 +802,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb46
     +
       ((va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46)
-      << (uint32_t)16U
-      | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> (uint32_t)16U);
+      << 16U
+      | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> 16U);
   abcd[2U] = v45;
   uint32_t va46 = abcd[1U];
   uint32_t vb47 = abcd[2U];
   uint32_t vc47 = abcd[3U];
   uint32_t vd47 = abcd[0U];
-  uint8_t *b47 = x + (uint32_t)8U;
+  uint8_t *b47 = x + 8U;
   uint32_t u46 = load32_le(b47);
   uint32_t xk46 = u46;
   uint32_t ti47 = _t[47U];
@@ -826,8 +818,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb47
     +
       ((va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47)
-      << (uint32_t)23U
-      | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> (uint32_t)9U);
+      << 23U
+      | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> 9U);
   abcd[1U] = v46;
   uint32_t va47 = abcd[0U];
   uint32_t vb48 = abcd[1U];
@@ -842,14 +834,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb48
     +
       ((va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48)
-      << (uint32_t)6U
-      | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> (uint32_t)26U);
+      << 6U
+      | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> 26U);
   abcd[0U] = v47;
   uint32_t va48 = abcd[3U];
   uint32_t vb49 = abcd[0U];
   uint32_t vc49 = abcd[1U];
   uint32_t vd49 = abcd[2U];
-  uint8_t *b49 = x + (uint32_t)28U;
+  uint8_t *b49 = x + 28U;
   uint32_t u48 = load32_le(b49);
   uint32_t xk48 = u48;
   uint32_t ti49 = _t[49U];
@@ -858,14 +850,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb49
     +
       ((va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49)
-      << (uint32_t)10U
-      | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> (uint32_t)22U);
+      << 10U
+      | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> 22U);
   abcd[3U] = v48;
   uint32_t va49 = abcd[2U];
   uint32_t vb50 = abcd[3U];
   uint32_t vc50 = abcd[0U];
   uint32_t vd50 = abcd[1U];
-  uint8_t *b50 = x + (uint32_t)56U;
+  uint8_t *b50 = x + 56U;
   uint32_t u49 = load32_le(b50);
   uint32_t xk49 = u49;
   uint32_t ti50 = _t[50U];
@@ -874,14 +866,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb50
     +
       ((va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50)
-      << (uint32_t)15U
-      | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> (uint32_t)17U);
+      << 15U
+      | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> 17U);
   abcd[2U] = v49;
   uint32_t va50 = abcd[1U];
   uint32_t vb51 = abcd[2U];
   uint32_t vc51 = abcd[3U];
   uint32_t vd51 = abcd[0U];
-  uint8_t *b51 = x + (uint32_t)20U;
+  uint8_t *b51 = x + 20U;
   uint32_t u50 = load32_le(b51);
   uint32_t xk50 = u50;
   uint32_t ti51 = _t[51U];
@@ -890,14 +882,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb51
     +
       ((va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51)
-      << (uint32_t)21U
-      | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> (uint32_t)11U);
+      << 21U
+      | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> 11U);
   abcd[1U] = v50;
   uint32_t va51 = abcd[0U];
   uint32_t vb52 = abcd[1U];
   uint32_t vc52 = abcd[2U];
   uint32_t vd52 = abcd[3U];
-  uint8_t *b52 = x + (uint32_t)48U;
+  uint8_t *b52 = x + 48U;
   uint32_t u51 = load32_le(b52);
   uint32_t xk51 = u51;
   uint32_t ti52 = _t[52U];
@@ -906,14 +898,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb52
     +
       ((va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52)
-      << (uint32_t)6U
-      | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> (uint32_t)26U);
+      << 6U
+      | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> 26U);
   abcd[0U] = v51;
   uint32_t va52 = abcd[3U];
   uint32_t vb53 = abcd[0U];
   uint32_t vc53 = abcd[1U];
   uint32_t vd53 = abcd[2U];
-  uint8_t *b53 = x + (uint32_t)12U;
+  uint8_t *b53 = x + 12U;
   uint32_t u52 = load32_le(b53);
   uint32_t xk52 = u52;
   uint32_t ti53 = _t[53U];
@@ -922,14 +914,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb53
     +
       ((va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53)
-      << (uint32_t)10U
-      | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> (uint32_t)22U);
+      << 10U
+      | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> 22U);
   abcd[3U] = v52;
   uint32_t va53 = abcd[2U];
   uint32_t vb54 = abcd[3U];
   uint32_t vc54 = abcd[0U];
   uint32_t vd54 = abcd[1U];
-  uint8_t *b54 = x + (uint32_t)40U;
+  uint8_t *b54 = x + 40U;
   uint32_t u53 = load32_le(b54);
   uint32_t xk53 = u53;
   uint32_t ti54 = _t[54U];
@@ -938,14 +930,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb54
     +
       ((va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54)
-      << (uint32_t)15U
-      | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> (uint32_t)17U);
+      << 15U
+      | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> 17U);
   abcd[2U] = v53;
   uint32_t va54 = abcd[1U];
   uint32_t vb55 = abcd[2U];
   uint32_t vc55 = abcd[3U];
   uint32_t vd55 = abcd[0U];
-  uint8_t *b55 = x + (uint32_t)4U;
+  uint8_t *b55 = x + 4U;
   uint32_t u54 = load32_le(b55);
   uint32_t xk54 = u54;
   uint32_t ti55 = _t[55U];
@@ -954,14 +946,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb55
     +
       ((va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55)
-      << (uint32_t)21U
-      | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> (uint32_t)11U);
+      << 21U
+      | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> 11U);
   abcd[1U] = v54;
   uint32_t va55 = abcd[0U];
   uint32_t vb56 = abcd[1U];
   uint32_t vc56 = abcd[2U];
   uint32_t vd56 = abcd[3U];
-  uint8_t *b56 = x + (uint32_t)32U;
+  uint8_t *b56 = x + 32U;
   uint32_t u55 = load32_le(b56);
   uint32_t xk55 = u55;
   uint32_t ti56 = _t[56U];
@@ -970,14 +962,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb56
     +
       ((va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56)
-      << (uint32_t)6U
-      | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> (uint32_t)26U);
+      << 6U
+      | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> 26U);
   abcd[0U] = v55;
   uint32_t va56 = abcd[3U];
   uint32_t vb57 = abcd[0U];
   uint32_t vc57 = abcd[1U];
   uint32_t vd57 = abcd[2U];
-  uint8_t *b57 = x + (uint32_t)60U;
+  uint8_t *b57 = x + 60U;
   uint32_t u56 = load32_le(b57);
   uint32_t xk56 = u56;
   uint32_t ti57 = _t[57U];
@@ -986,14 +978,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb57
     +
       ((va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57)
-      << (uint32_t)10U
-      | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> (uint32_t)22U);
+      << 10U
+      | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> 22U);
   abcd[3U] = v56;
   uint32_t va57 = abcd[2U];
   uint32_t vb58 = abcd[3U];
   uint32_t vc58 = abcd[0U];
   uint32_t vd58 = abcd[1U];
-  uint8_t *b58 = x + (uint32_t)24U;
+  uint8_t *b58 = x + 24U;
   uint32_t u57 = load32_le(b58);
   uint32_t xk57 = u57;
   uint32_t ti58 = _t[58U];
@@ -1002,14 +994,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb58
     +
       ((va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58)
-      << (uint32_t)15U
-      | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> (uint32_t)17U);
+      << 15U
+      | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> 17U);
   abcd[2U] = v57;
   uint32_t va58 = abcd[1U];
   uint32_t vb59 = abcd[2U];
   uint32_t vc59 = abcd[3U];
   uint32_t vd59 = abcd[0U];
-  uint8_t *b59 = x + (uint32_t)52U;
+  uint8_t *b59 = x + 52U;
   uint32_t u58 = load32_le(b59);
   uint32_t xk58 = u58;
   uint32_t ti59 = _t[59U];
@@ -1018,14 +1010,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb59
     +
       ((va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59)
-      << (uint32_t)21U
-      | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> (uint32_t)11U);
+      << 21U
+      | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> 11U);
   abcd[1U] = v58;
   uint32_t va59 = abcd[0U];
   uint32_t vb60 = abcd[1U];
   uint32_t vc60 = abcd[2U];
   uint32_t vd60 = abcd[3U];
-  uint8_t *b60 = x + (uint32_t)16U;
+  uint8_t *b60 = x + 16U;
   uint32_t u59 = load32_le(b60);
   uint32_t xk59 = u59;
   uint32_t ti60 = _t[60U];
@@ -1034,14 +1026,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb60
     +
       ((va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60)
-      << (uint32_t)6U
-      | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> (uint32_t)26U);
+      << 6U
+      | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> 26U);
   abcd[0U] = v59;
   uint32_t va60 = abcd[3U];
   uint32_t vb61 = abcd[0U];
   uint32_t vc61 = abcd[1U];
   uint32_t vd61 = abcd[2U];
-  uint8_t *b61 = x + (uint32_t)44U;
+  uint8_t *b61 = x + 44U;
   uint32_t u60 = load32_le(b61);
   uint32_t xk60 = u60;
   uint32_t ti61 = _t[61U];
@@ -1050,14 +1042,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb61
     +
       ((va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61)
-      << (uint32_t)10U
-      | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> (uint32_t)22U);
+      << 10U
+      | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> 22U);
   abcd[3U] = v60;
   uint32_t va61 = abcd[2U];
   uint32_t vb62 = abcd[3U];
   uint32_t vc62 = abcd[0U];
   uint32_t vd62 = abcd[1U];
-  uint8_t *b62 = x + (uint32_t)8U;
+  uint8_t *b62 = x + 8U;
   uint32_t u61 = load32_le(b62);
   uint32_t xk61 = u61;
   uint32_t ti62 = _t[62U];
@@ -1066,14 +1058,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb62
     +
       ((va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62)
-      << (uint32_t)15U
-      | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> (uint32_t)17U);
+      << 15U
+      | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> 17U);
   abcd[2U] = v61;
   uint32_t va62 = abcd[1U];
   uint32_t vb = abcd[2U];
   uint32_t vc = abcd[3U];
   uint32_t vd = abcd[0U];
-  uint8_t *b63 = x + (uint32_t)36U;
+  uint8_t *b63 = x + 36U;
   uint32_t u62 = load32_le(b63);
   uint32_t xk62 = u62;
   uint32_t ti = _t[63U];
@@ -1082,8 +1074,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb
     +
       ((va62 + (vc ^ (vb | ~vd)) + xk62 + ti)
-      << (uint32_t)21U
-      | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> (uint32_t)11U);
+      << 21U
+      | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> 11U);
   abcd[1U] = v62;
   uint32_t a = abcd[0U];
   uint32_t b = abcd[1U];
@@ -1095,98 +1087,69 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
   abcd[3U] = d + dd;
 }
 
-static void legacy_pad(uint64_t len, uint8_t *dst)
+static void pad(uint64_t len, uint8_t *dst)
 {
   uint8_t *dst1 = dst;
-  dst1[0U] = (uint8_t)0x80U;
-  uint8_t *dst2 = dst + (uint32_t)1U;
-  for
-  (uint32_t
-    i = (uint32_t)0U;
-    i
-    < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
-    i++)
+  dst1[0U] = 0x80U;
+  uint8_t *dst2 = dst + 1U;
+  for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
   {
-    dst2[i] = (uint8_t)0U;
+    dst2[i] = 0U;
   }
-  uint8_t
-  *dst3 =
-    dst
-    +
-      (uint32_t)1U
-      +
-        ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
-        % (uint32_t)64U;
-  store64_le(dst3, len << (uint32_t)3U);
+  uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+  store64_le(dst3, len << 3U);
 }
 
-void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst)
+void Hacl_Hash_MD5_finish(uint32_t *s, uint8_t *dst)
 {
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(dst + i * (uint32_t)4U, s[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(dst + i * 4U, s[i]););
 }
 
-void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
+void Hacl_Hash_MD5_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
-    uint32_t sz = (uint32_t)64U;
+    uint32_t sz = 64U;
     uint8_t *block = blocks + sz * i;
-    legacy_update(s, block);
+    update(s, block);
   }
 }
 
 void
-Hacl_Hash_MD5_legacy_update_last(
-  uint32_t *s,
-  uint64_t prev_len,
-  uint8_t *input,
-  uint32_t input_len
-)
+Hacl_Hash_MD5_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len)
 {
-  uint32_t blocks_n = input_len / (uint32_t)64U;
-  uint32_t blocks_len = blocks_n * (uint32_t)64U;
+  uint32_t blocks_n = input_len / 64U;
+  uint32_t blocks_len = blocks_n * 64U;
   uint8_t *blocks = input;
   uint32_t rest_len = input_len - blocks_len;
   uint8_t *rest = input + blocks_len;
-  Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
+  Hacl_Hash_MD5_update_multi(s, blocks, blocks_n);
   uint64_t total_input_len = prev_len + (uint64_t)input_len;
-  uint32_t
-  pad_len =
-    (uint32_t)1U
-    +
-      ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
-      % (uint32_t)64U
-    + (uint32_t)8U;
+  uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
   uint32_t tmp_len = rest_len + pad_len;
   uint8_t tmp_twoblocks[128U] = { 0U };
   uint8_t *tmp = tmp_twoblocks;
   uint8_t *tmp_rest = tmp;
   uint8_t *tmp_pad = tmp + rest_len;
   memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
-  legacy_pad(total_input_len, tmp_pad);
-  Hacl_Hash_MD5_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+  pad(total_input_len, tmp_pad);
+  Hacl_Hash_MD5_update_multi(s, tmp, tmp_len / 64U);
 }
 
-void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_MD5_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
-  uint32_t
-  s[4U] =
-    { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t s[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -1195,75 +1158,75 @@ void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
   uint8_t *blocks = blocks0;
   uint32_t rest_len = rest_len0;
   uint8_t *rest = rest0;
-  Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
-  Hacl_Hash_MD5_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len);
-  Hacl_Hash_Core_MD5_legacy_finish(s, dst);
+  Hacl_Hash_MD5_update_multi(s, blocks, blocks_n);
+  Hacl_Hash_MD5_update_last(s, (uint64_t)blocks_len, rest, rest_len);
+  Hacl_Hash_MD5_finish(s, output);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_malloc(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
-  Hacl_Hash_Core_MD5_legacy_init(block_state);
+  Hacl_Hash_MD5_init(block_state);
   return p;
 }
 
-void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_MD5_reset(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
-  Hacl_Hash_Core_MD5_legacy_init(block_state);
+  Hacl_Hash_MD5_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_MD_state_32 s = *p;
+  Hacl_Streaming_MD_state_32 s = *state;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (chunk_len <= 64U - sz)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -1273,74 +1236,74 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_MD5_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Hash_MD5_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Hacl_Hash_MD5_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -1349,114 +1312,109 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_MD_state_32 s10 = *p;
+    Hacl_Streaming_MD_state_32 s10 = *state;
     uint32_t *block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_MD5_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
     if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Hash_MD5_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Hacl_Hash_MD5_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
   return Hacl_Streaming_Types_Success;
 }
 
-void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_MD5_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_32 scrut = *p;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[4U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)4U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 4U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Hash_MD5_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+  Hacl_Hash_MD5_update_multi(tmp_block_state, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_Hash_MD5_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
-  Hacl_Hash_Core_MD5_legacy_finish(tmp_block_state, dst);
+  Hacl_Hash_MD5_update_last(tmp_block_state, prev_len_last, buf_last, r);
+  Hacl_Hash_MD5_finish(tmp_block_state, output);
 }
 
-void Hacl_Streaming_MD5_legacy_free(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_MD5_free(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
   KRML_HOST_FREE(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_state_32 *s0)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_copy(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s0;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)4U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 4U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
@@ -1465,8 +1423,8 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_sta
   return p;
 }
 
-void Hacl_Streaming_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_MD5_hash(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
-  Hacl_Hash_MD5_legacy_hash(input, input_len, dst);
+  Hacl_Hash_MD5_hash_oneshot(output, input, input_len);
 }
 
diff --git a/src/Hacl_Hash_SHA1.c b/src/Hacl_Hash_SHA1.c
index 5ecb3c0b..1a8b09b1 100644
--- a/src/Hacl_Hash_SHA1.c
+++ b/src/Hacl_Hash_SHA1.c
@@ -25,19 +25,14 @@
 
 #include "internal/Hacl_Hash_SHA1.h"
 
-static uint32_t
-_h0[5U] =
-  {
-    (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-    (uint32_t)0xc3d2e1f0U
-  };
+static uint32_t _h0[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
 
-void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s)
+void Hacl_Hash_SHA1_init(uint32_t *s)
 {
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, s[i] = _h0[i];);
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i] = _h0[i];);
 }
 
-static void legacy_update(uint32_t *h, uint8_t *l)
+static void update(uint32_t *h, uint8_t *l)
 {
   uint32_t ha = h[0U];
   uint32_t hb = h[1U];
@@ -45,29 +40,26 @@ static void legacy_update(uint32_t *h, uint8_t *l)
   uint32_t hd = h[3U];
   uint32_t he = h[4U];
   uint32_t _w[80U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
     uint32_t v;
-    if (i < (uint32_t)16U)
+    if (i < 16U)
     {
-      uint8_t *b = l + i * (uint32_t)4U;
+      uint8_t *b = l + i * 4U;
       uint32_t u = load32_be(b);
       v = u;
     }
     else
     {
-      uint32_t wmit3 = _w[i - (uint32_t)3U];
-      uint32_t wmit8 = _w[i - (uint32_t)8U];
-      uint32_t wmit14 = _w[i - (uint32_t)14U];
-      uint32_t wmit16 = _w[i - (uint32_t)16U];
-      v =
-        (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16)))
-        << (uint32_t)1U
-        | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> (uint32_t)31U;
+      uint32_t wmit3 = _w[i - 3U];
+      uint32_t wmit8 = _w[i - 8U];
+      uint32_t wmit14 = _w[i - 14U];
+      uint32_t wmit16 = _w[i - 16U];
+      v = (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) << 1U | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> 31U;
     }
     _w[i] = v;
   }
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
     uint32_t _a = h[0U];
     uint32_t _b = h[1U];
@@ -76,11 +68,11 @@ static void legacy_update(uint32_t *h, uint8_t *l)
     uint32_t _e = h[4U];
     uint32_t wmit = _w[i];
     uint32_t ite0;
-    if (i < (uint32_t)20U)
+    if (i < 20U)
     {
       ite0 = (_b & _c) ^ (~_b & _d);
     }
-    else if ((uint32_t)39U < i && i < (uint32_t)60U)
+    else if (39U < i && i < 60U)
     {
       ite0 = (_b & _c) ^ ((_b & _d) ^ (_c & _d));
     }
@@ -89,32 +81,32 @@ static void legacy_update(uint32_t *h, uint8_t *l)
       ite0 = _b ^ (_c ^ _d);
     }
     uint32_t ite;
-    if (i < (uint32_t)20U)
+    if (i < 20U)
     {
-      ite = (uint32_t)0x5a827999U;
+      ite = 0x5a827999U;
     }
-    else if (i < (uint32_t)40U)
+    else if (i < 40U)
     {
-      ite = (uint32_t)0x6ed9eba1U;
+      ite = 0x6ed9eba1U;
     }
-    else if (i < (uint32_t)60U)
+    else if (i < 60U)
     {
-      ite = (uint32_t)0x8f1bbcdcU;
+      ite = 0x8f1bbcdcU;
     }
     else
     {
-      ite = (uint32_t)0xca62c1d6U;
+      ite = 0xca62c1d6U;
     }
-    uint32_t _T = (_a << (uint32_t)5U | _a >> (uint32_t)27U) + ite0 + _e + ite + wmit;
+    uint32_t _T = (_a << 5U | _a >> 27U) + ite0 + _e + ite + wmit;
     h[0U] = _T;
     h[1U] = _a;
-    h[2U] = _b << (uint32_t)30U | _b >> (uint32_t)2U;
+    h[2U] = _b << 30U | _b >> 2U;
     h[3U] = _c;
     h[4U] = _d;
   }
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
-    _w[i] = (uint32_t)0U;
+    _w[i] = 0U;
   }
   uint32_t sta = h[0U];
   uint32_t stb = h[1U];
@@ -128,101 +120,69 @@ static void legacy_update(uint32_t *h, uint8_t *l)
   h[4U] = ste + he;
 }
 
-static void legacy_pad(uint64_t len, uint8_t *dst)
+static void pad(uint64_t len, uint8_t *dst)
 {
   uint8_t *dst1 = dst;
-  dst1[0U] = (uint8_t)0x80U;
-  uint8_t *dst2 = dst + (uint32_t)1U;
-  for
-  (uint32_t
-    i = (uint32_t)0U;
-    i
-    < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
-    i++)
+  dst1[0U] = 0x80U;
+  uint8_t *dst2 = dst + 1U;
+  for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
   {
-    dst2[i] = (uint8_t)0U;
+    dst2[i] = 0U;
   }
-  uint8_t
-  *dst3 =
-    dst
-    +
-      (uint32_t)1U
-      +
-        ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
-        % (uint32_t)64U;
-  store64_be(dst3, len << (uint32_t)3U);
+  uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+  store64_be(dst3, len << 3U);
 }
 
-void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst)
+void Hacl_Hash_SHA1_finish(uint32_t *s, uint8_t *dst)
 {
-  KRML_MAYBE_FOR5(i,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
-    store32_be(dst + i * (uint32_t)4U, s[i]););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, store32_be(dst + i * 4U, s[i]););
 }
 
-void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
+void Hacl_Hash_SHA1_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
-    uint32_t sz = (uint32_t)64U;
+    uint32_t sz = 64U;
     uint8_t *block = blocks + sz * i;
-    legacy_update(s, block);
+    update(s, block);
   }
 }
 
 void
-Hacl_Hash_SHA1_legacy_update_last(
-  uint32_t *s,
-  uint64_t prev_len,
-  uint8_t *input,
-  uint32_t input_len
-)
+Hacl_Hash_SHA1_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len)
 {
-  uint32_t blocks_n = input_len / (uint32_t)64U;
-  uint32_t blocks_len = blocks_n * (uint32_t)64U;
+  uint32_t blocks_n = input_len / 64U;
+  uint32_t blocks_len = blocks_n * 64U;
   uint8_t *blocks = input;
   uint32_t rest_len = input_len - blocks_len;
   uint8_t *rest = input + blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
+  Hacl_Hash_SHA1_update_multi(s, blocks, blocks_n);
   uint64_t total_input_len = prev_len + (uint64_t)input_len;
-  uint32_t
-  pad_len =
-    (uint32_t)1U
-    +
-      ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
-      % (uint32_t)64U
-    + (uint32_t)8U;
+  uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
   uint32_t tmp_len = rest_len + pad_len;
   uint8_t tmp_twoblocks[128U] = { 0U };
   uint8_t *tmp = tmp_twoblocks;
   uint8_t *tmp_rest = tmp;
   uint8_t *tmp_pad = tmp + rest_len;
   memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
-  legacy_pad(total_input_len, tmp_pad);
-  Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+  pad(total_input_len, tmp_pad);
+  Hacl_Hash_SHA1_update_multi(s, tmp, tmp_len / 64U);
 }
 
-void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA1_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -231,75 +191,75 @@ void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst
   uint8_t *blocks = blocks0;
   uint32_t rest_len = rest_len0;
   uint8_t *rest = rest0;
-  Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
-  Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len);
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
+  Hacl_Hash_SHA1_update_multi(s, blocks, blocks_n);
+  Hacl_Hash_SHA1_update_last(s, (uint64_t)blocks_len, rest, rest_len);
+  Hacl_Hash_SHA1_finish(s, output);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_malloc(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
-  Hacl_Hash_Core_SHA1_legacy_init(block_state);
+  Hacl_Hash_SHA1_init(block_state);
   return p;
 }
 
-void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA1_reset(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
-  Hacl_Hash_Core_SHA1_legacy_init(block_state);
+  Hacl_Hash_SHA1_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_MD_state_32 s = *p;
+  Hacl_Streaming_MD_state_32 s = *state;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (chunk_len <= 64U - sz)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -309,74 +269,74 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_SHA1_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Hacl_Hash_SHA1_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -385,114 +345,109 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_MD_state_32 s10 = *p;
+    Hacl_Streaming_MD_state_32 s10 = *state;
     uint32_t *block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_SHA1_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
     if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Hacl_Hash_SHA1_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
   return Hacl_Streaming_Types_Success;
 }
 
-void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_SHA1_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_32 scrut = *p;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[5U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)5U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 5U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+  Hacl_Hash_SHA1_update_multi(tmp_block_state, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_Hash_SHA1_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
-  Hacl_Hash_Core_SHA1_legacy_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA1_update_last(tmp_block_state, prev_len_last, buf_last, r);
+  Hacl_Hash_SHA1_finish(tmp_block_state, output);
 }
 
-void Hacl_Streaming_SHA1_legacy_free(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA1_free(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
   KRML_HOST_FREE(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_state_32 *s0)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_copy(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s0;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)5U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 5U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
@@ -501,8 +456,8 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_st
   return p;
 }
 
-void Hacl_Streaming_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA1_hash(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
-  Hacl_Hash_SHA1_legacy_hash(input, input_len, dst);
+  Hacl_Hash_SHA1_hash_oneshot(output, input, input_len);
 }
 
diff --git a/src/Hacl_Hash_SHA2.c b/src/Hacl_Hash_SHA2.c
index c93c3616..995fe707 100644
--- a/src/Hacl_Hash_SHA2.c
+++ b/src/Hacl_Hash_SHA2.c
@@ -27,14 +27,14 @@
 
 #include "internal/Hacl_Krmllib.h"
 
-void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash)
+void Hacl_Hash_SHA2_sha256_init(uint32_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t x = Hacl_Hash_SHA2_h256[i];
     os[i] = x;);
 }
 
@@ -42,49 +42,49 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
 {
   uint32_t hash_old[8U] = { 0U };
   uint32_t ws[16U] = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(hash_old, hash, 8U * sizeof (uint32_t));
   uint8_t *b10 = b;
   uint32_t u = load32_be(b10);
   ws[0U] = u;
-  uint32_t u0 = load32_be(b10 + (uint32_t)4U);
+  uint32_t u0 = load32_be(b10 + 4U);
   ws[1U] = u0;
-  uint32_t u1 = load32_be(b10 + (uint32_t)8U);
+  uint32_t u1 = load32_be(b10 + 8U);
   ws[2U] = u1;
-  uint32_t u2 = load32_be(b10 + (uint32_t)12U);
+  uint32_t u2 = load32_be(b10 + 12U);
   ws[3U] = u2;
-  uint32_t u3 = load32_be(b10 + (uint32_t)16U);
+  uint32_t u3 = load32_be(b10 + 16U);
   ws[4U] = u3;
-  uint32_t u4 = load32_be(b10 + (uint32_t)20U);
+  uint32_t u4 = load32_be(b10 + 20U);
   ws[5U] = u4;
-  uint32_t u5 = load32_be(b10 + (uint32_t)24U);
+  uint32_t u5 = load32_be(b10 + 24U);
   ws[6U] = u5;
-  uint32_t u6 = load32_be(b10 + (uint32_t)28U);
+  uint32_t u6 = load32_be(b10 + 28U);
   ws[7U] = u6;
-  uint32_t u7 = load32_be(b10 + (uint32_t)32U);
+  uint32_t u7 = load32_be(b10 + 32U);
   ws[8U] = u7;
-  uint32_t u8 = load32_be(b10 + (uint32_t)36U);
+  uint32_t u8 = load32_be(b10 + 36U);
   ws[9U] = u8;
-  uint32_t u9 = load32_be(b10 + (uint32_t)40U);
+  uint32_t u9 = load32_be(b10 + 40U);
   ws[10U] = u9;
-  uint32_t u10 = load32_be(b10 + (uint32_t)44U);
+  uint32_t u10 = load32_be(b10 + 44U);
   ws[11U] = u10;
-  uint32_t u11 = load32_be(b10 + (uint32_t)48U);
+  uint32_t u11 = load32_be(b10 + 48U);
   ws[12U] = u11;
-  uint32_t u12 = load32_be(b10 + (uint32_t)52U);
+  uint32_t u12 = load32_be(b10 + 52U);
   ws[13U] = u12;
-  uint32_t u13 = load32_be(b10 + (uint32_t)56U);
+  uint32_t u13 = load32_be(b10 + 56U);
   ws[14U] = u13;
-  uint32_t u14 = load32_be(b10 + (uint32_t)60U);
+  uint32_t u14 = load32_be(b10 + 60U);
   ws[15U] = u14;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       uint32_t ws_t = ws[i];
       uint32_t a0 = hash[0U];
       uint32_t b0 = hash[1U];
@@ -98,20 +98,13 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
       uint32_t
       t1 =
         h02
-        +
-          ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U)
-          ^
-            ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U)
-            ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U)))
+        + ((e0 << 26U | e0 >> 6U) ^ ((e0 << 21U | e0 >> 11U) ^ (e0 << 7U | e0 >> 25U)))
         + ((e0 & f0) ^ (~e0 & g0))
         + k_e_t
         + ws_t;
       uint32_t
       t2 =
-        ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U)
-        ^
-          ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U)
-          ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U)))
+        ((a0 << 30U | a0 >> 2U) ^ ((a0 << 19U | a0 >> 13U) ^ (a0 << 10U | a0 >> 22U)))
         + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
       uint32_t a1 = t1 + t2;
       uint32_t b1 = a0;
@@ -129,74 +122,63 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         uint32_t t16 = ws[i];
-        uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
-        uint32_t
-        s1 =
-          (t2 << (uint32_t)15U | t2 >> (uint32_t)17U)
-          ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U);
-        uint32_t
-        s0 =
-          (t15 << (uint32_t)25U | t15 >> (uint32_t)7U)
-          ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U);
+        uint32_t t15 = ws[(i + 1U) % 16U];
+        uint32_t t7 = ws[(i + 9U) % 16U];
+        uint32_t t2 = ws[(i + 14U) % 16U];
+        uint32_t s1 = (t2 << 15U | t2 >> 17U) ^ ((t2 << 13U | t2 >> 19U) ^ t2 >> 10U);
+        uint32_t s0 = (t15 << 25U | t15 >> 7U) ^ ((t15 << 14U | t15 >> 18U) ^ t15 >> 3U);
         ws[i] = s1 + t7 + s0 + t16;);
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
     uint32_t x = hash[i] + hash_old[i];
     os[i] = x;);
 }
 
-void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
+void Hacl_Hash_SHA2_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b0 = b;
-    uint8_t *mb = b0 + i * (uint32_t)64U;
+    uint8_t *mb = b0 + i * 64U;
     sha256_update(mb, st);
   }
 }
 
 void
-Hacl_SHA2_Scalar32_sha256_update_last(
-  uint64_t totlen,
-  uint32_t len,
-  uint8_t *b,
-  uint32_t *hash
-)
+Hacl_Hash_SHA2_sha256_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *hash)
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[128U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b0 = b;
   memcpy(last, b0, len * sizeof (uint8_t));
-  last[len] = (uint8_t)0x80U;
-  memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last[len] = 0x80U;
+  memcpy(last + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)64U;
+  uint8_t *last10 = last + 64U;
   uint8_t *l0 = last00;
   uint8_t *l1 = last10;
   uint8_t *lb0 = l0;
@@ -204,65 +186,56 @@ Hacl_SHA2_Scalar32_sha256_update_last(
   uint8_t *last0 = lb0;
   uint8_t *last1 = lb1;
   sha256_update(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update(last1, hash);
     return;
   }
 }
 
-void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha256_finish(uint32_t *st, uint8_t *h)
 {
   uint8_t hbuf[32U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(hbuf + i * (uint32_t)4U, st[i]););
-  memcpy(h, hbuf, (uint32_t)32U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+  memcpy(h, hbuf, 32U * sizeof (uint8_t));
 }
 
-void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash)
+void Hacl_Hash_SHA2_sha224_init(uint32_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
+    uint32_t x = Hacl_Hash_SHA2_h224[i];
     os[i] = x;);
 }
 
 static inline void sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
 {
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(len, b, st);
+  Hacl_Hash_SHA2_sha256_update_nblocks(len, b, st);
 }
 
-void
-Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st)
+void Hacl_Hash_SHA2_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st)
 {
-  Hacl_SHA2_Scalar32_sha256_update_last(totlen, len, b, st);
+  Hacl_Hash_SHA2_sha256_update_last(totlen, len, b, st);
 }
 
-void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha224_finish(uint32_t *st, uint8_t *h)
 {
   uint8_t hbuf[32U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(hbuf + i * (uint32_t)4U, st[i]););
-  memcpy(h, hbuf, (uint32_t)28U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+  memcpy(h, hbuf, 28U * sizeof (uint8_t));
 }
 
-void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash)
+void Hacl_Hash_SHA2_sha512_init(uint64_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
+    uint64_t x = Hacl_Hash_SHA2_h512[i];
     os[i] = x;);
 }
 
@@ -270,49 +243,49 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
 {
   uint64_t hash_old[8U] = { 0U };
   uint64_t ws[16U] = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(hash_old, hash, 8U * sizeof (uint64_t));
   uint8_t *b10 = b;
   uint64_t u = load64_be(b10);
   ws[0U] = u;
-  uint64_t u0 = load64_be(b10 + (uint32_t)8U);
+  uint64_t u0 = load64_be(b10 + 8U);
   ws[1U] = u0;
-  uint64_t u1 = load64_be(b10 + (uint32_t)16U);
+  uint64_t u1 = load64_be(b10 + 16U);
   ws[2U] = u1;
-  uint64_t u2 = load64_be(b10 + (uint32_t)24U);
+  uint64_t u2 = load64_be(b10 + 24U);
   ws[3U] = u2;
-  uint64_t u3 = load64_be(b10 + (uint32_t)32U);
+  uint64_t u3 = load64_be(b10 + 32U);
   ws[4U] = u3;
-  uint64_t u4 = load64_be(b10 + (uint32_t)40U);
+  uint64_t u4 = load64_be(b10 + 40U);
   ws[5U] = u4;
-  uint64_t u5 = load64_be(b10 + (uint32_t)48U);
+  uint64_t u5 = load64_be(b10 + 48U);
   ws[6U] = u5;
-  uint64_t u6 = load64_be(b10 + (uint32_t)56U);
+  uint64_t u6 = load64_be(b10 + 56U);
   ws[7U] = u6;
-  uint64_t u7 = load64_be(b10 + (uint32_t)64U);
+  uint64_t u7 = load64_be(b10 + 64U);
   ws[8U] = u7;
-  uint64_t u8 = load64_be(b10 + (uint32_t)72U);
+  uint64_t u8 = load64_be(b10 + 72U);
   ws[9U] = u8;
-  uint64_t u9 = load64_be(b10 + (uint32_t)80U);
+  uint64_t u9 = load64_be(b10 + 80U);
   ws[10U] = u9;
-  uint64_t u10 = load64_be(b10 + (uint32_t)88U);
+  uint64_t u10 = load64_be(b10 + 88U);
   ws[11U] = u10;
-  uint64_t u11 = load64_be(b10 + (uint32_t)96U);
+  uint64_t u11 = load64_be(b10 + 96U);
   ws[12U] = u11;
-  uint64_t u12 = load64_be(b10 + (uint32_t)104U);
+  uint64_t u12 = load64_be(b10 + 104U);
   ws[13U] = u12;
-  uint64_t u13 = load64_be(b10 + (uint32_t)112U);
+  uint64_t u13 = load64_be(b10 + 112U);
   ws[14U] = u13;
-  uint64_t u14 = load64_be(b10 + (uint32_t)120U);
+  uint64_t u14 = load64_be(b10 + 120U);
   ws[15U] = u14;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Hash_SHA2_k384_512[16U * i0 + i];
       uint64_t ws_t = ws[i];
       uint64_t a0 = hash[0U];
       uint64_t b0 = hash[1U];
@@ -326,20 +299,13 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
       uint64_t
       t1 =
         h02
-        +
-          ((e0 << (uint32_t)50U | e0 >> (uint32_t)14U)
-          ^
-            ((e0 << (uint32_t)46U | e0 >> (uint32_t)18U)
-            ^ (e0 << (uint32_t)23U | e0 >> (uint32_t)41U)))
+        + ((e0 << 50U | e0 >> 14U) ^ ((e0 << 46U | e0 >> 18U) ^ (e0 << 23U | e0 >> 41U)))
         + ((e0 & f0) ^ (~e0 & g0))
         + k_e_t
         + ws_t;
       uint64_t
       t2 =
-        ((a0 << (uint32_t)36U | a0 >> (uint32_t)28U)
-        ^
-          ((a0 << (uint32_t)30U | a0 >> (uint32_t)34U)
-          ^ (a0 << (uint32_t)25U | a0 >> (uint32_t)39U)))
+        ((a0 << 36U | a0 >> 28U) ^ ((a0 << 30U | a0 >> 34U) ^ (a0 << 25U | a0 >> 39U)))
         + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
       uint64_t a1 = t1 + t2;
       uint64_t b1 = a0;
@@ -357,48 +323,42 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         uint64_t t16 = ws[i];
-        uint64_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        uint64_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        uint64_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
-        uint64_t
-        s1 =
-          (t2 << (uint32_t)45U | t2 >> (uint32_t)19U)
-          ^ ((t2 << (uint32_t)3U | t2 >> (uint32_t)61U) ^ t2 >> (uint32_t)6U);
-        uint64_t
-        s0 =
-          (t15 << (uint32_t)63U | t15 >> (uint32_t)1U)
-          ^ ((t15 << (uint32_t)56U | t15 >> (uint32_t)8U) ^ t15 >> (uint32_t)7U);
+        uint64_t t15 = ws[(i + 1U) % 16U];
+        uint64_t t7 = ws[(i + 9U) % 16U];
+        uint64_t t2 = ws[(i + 14U) % 16U];
+        uint64_t s1 = (t2 << 45U | t2 >> 19U) ^ ((t2 << 3U | t2 >> 61U) ^ t2 >> 6U);
+        uint64_t s0 = (t15 << 63U | t15 >> 1U) ^ ((t15 << 56U | t15 >> 8U) ^ t15 >> 7U);
         ws[i] = s1 + t7 + s0 + t16;);
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
     uint64_t x = hash[i] + hash_old[i];
     os[i] = x;);
 }
 
-void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
+void Hacl_Hash_SHA2_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b0 = b;
-    uint8_t *mb = b0 + i * (uint32_t)128U;
+    uint8_t *mb = b0 + i * 128U;
     sha512_update(mb, st);
   }
 }
 
 void
-Hacl_SHA2_Scalar32_sha512_update_last(
+Hacl_Hash_SHA2_sha512_update_last(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
   uint8_t *b,
@@ -406,25 +366,25 @@ Hacl_SHA2_Scalar32_sha512_update_last(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[256U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b0 = b;
   memcpy(last, b0, len * sizeof (uint8_t));
-  last[len] = (uint8_t)0x80U;
-  memcpy(last + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last[len] = 0x80U;
+  memcpy(last + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
+  uint8_t *last10 = last + 128U;
   uint8_t *l0 = last00;
   uint8_t *l1 = last10;
   uint8_t *lb0 = l0;
@@ -432,76 +392,68 @@ Hacl_SHA2_Scalar32_sha512_update_last(
   uint8_t *last0 = lb0;
   uint8_t *last1 = lb1;
   sha512_update(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha512_update(last1, hash);
     return;
   }
 }
 
-void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha512_finish(uint64_t *st, uint8_t *h)
 {
   uint8_t hbuf[64U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store64_be(hbuf + i * (uint32_t)8U, st[i]););
-  memcpy(h, hbuf, (uint32_t)64U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+  memcpy(h, hbuf, 64U * sizeof (uint8_t));
 }
 
-void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash)
+void Hacl_Hash_SHA2_sha384_init(uint64_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
+    uint64_t x = Hacl_Hash_SHA2_h384[i];
     os[i] = x;);
 }
 
-void Hacl_SHA2_Scalar32_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
+void Hacl_Hash_SHA2_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
 {
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(len, b, st);
+  Hacl_Hash_SHA2_sha512_update_nblocks(len, b, st);
 }
 
 void
-Hacl_SHA2_Scalar32_sha384_update_last(
+Hacl_Hash_SHA2_sha384_update_last(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
   uint8_t *b,
   uint64_t *st
 )
 {
-  Hacl_SHA2_Scalar32_sha512_update_last(totlen, len, b, st);
+  Hacl_Hash_SHA2_sha512_update_last(totlen, len, b, st);
 }
 
-void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha384_finish(uint64_t *st, uint8_t *h)
 {
   uint8_t hbuf[64U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store64_be(hbuf + i * (uint32_t)8U, st[i]););
-  memcpy(h, hbuf, (uint32_t)48U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+  memcpy(h, hbuf, 48U * sizeof (uint8_t));
 }
 
 /**
 Allocate initial state for the SHA2_256 hash. The state is to be freed by
 calling `free_256`.
 */
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_256(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
-  Hacl_SHA2_Scalar32_sha256_init(block_state);
+  Hacl_Hash_SHA2_sha256_init(block_state);
   return p;
 }
 
@@ -511,16 +463,16 @@ The state is to be freed by calling `free_256`. Cloning the state this way is
 useful, for instance, if your control-flow diverges and you need to feed
 more (different) data into the hash in each branch.
 */
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state_32 *s0)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_copy_256(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s0;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 8U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
@@ -532,54 +484,54 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state
 /**
 Reset an existing state to the initial hash state with empty data.
 */
-void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA2_reset_256(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
-  Hacl_SHA2_Scalar32_sha256_init(block_state);
+  Hacl_Hash_SHA2_sha256_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 static inline Hacl_Streaming_Types_error_code
-update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_MD_state_32 s = *p;
+  Hacl_Streaming_MD_state_32 s = *state;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (chunk_len <= 64U - sz)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -589,76 +541,74 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+      Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf, block_state1);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
-      data1,
-      block_state1);
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -667,55 +617,48 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_MD_state_32 s10 = *p;
+    Hacl_Streaming_MD_state_32 s10 = *state;
     uint32_t *block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+      Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf, block_state1);
     }
     uint32_t ite;
     if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
-      data11,
-      block_state1);
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1);
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
@@ -725,209 +668,203 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
 /**
 Feed an arbitrary amount of data into the hash. This function returns 0 for
 success, or 1 if the combined length of all of the data passed to `update_256`
-(since the last call to `init_256`) exceeds 2^61-1 bytes.
+(since the last call to `reset_256`) exceeds 2^61-1 bytes.
 
 This function is identical to the update function for SHA2_224.
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_256(
-  Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_256(
+  Hacl_Streaming_MD_state_32 *state,
   uint8_t *input,
   uint32_t input_len
 )
 {
-  return update_224_256(p, input, input_len);
+  return update_224_256(state, input, input_len);
 }
 
 /**
-Write the resulting hash into `dst`, an array of 32 bytes. The state remains
-valid after a call to `finish_256`, meaning the user may feed more data into
-the hash via `update_256`. (The finish_256 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 32 bytes. The state remains
+valid after a call to `digest_256`, meaning the user may feed more data into
+the hash via `update_256`. (The digest_256 function operates on an internal copy of
 the state and therefore does not invalidate the client-held state `p`.)
 */
-void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_256(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_32 scrut = *p;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_Hash_SHA2_sha256_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_SHA2_Scalar32_sha256_update_last(prev_len_last + (uint64_t)r,
-    r,
-    buf_last,
-    tmp_block_state);
-  Hacl_SHA2_Scalar32_sha256_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA2_sha256_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state);
+  Hacl_Hash_SHA2_sha256_finish(tmp_block_state, output);
 }
 
 /**
-Free a state allocated with `create_in_256`.
+Free a state allocated with `malloc_256`.
 
 This function is identical to the free function for SHA2_224.
 */
-void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA2_free_256(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
   KRML_HOST_FREE(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 32 bytes.
 */
-void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint8_t *ib = input;
-  uint8_t *rb = dst;
+  uint8_t *rb = output;
   uint32_t st[8U] = { 0U };
-  Hacl_SHA2_Scalar32_sha256_init(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  Hacl_Hash_SHA2_sha256_init(st);
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  Hacl_Hash_SHA2_sha256_update_nblocks(input_len, ib, st);
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
-  Hacl_SHA2_Scalar32_sha256_update_last(len_, rem, lb, st);
-  Hacl_SHA2_Scalar32_sha256_finish(st, rb);
+  Hacl_Hash_SHA2_sha256_update_last(len_, rem, lb, st);
+  Hacl_Hash_SHA2_sha256_finish(st, rb);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_224(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
-  Hacl_SHA2_Scalar32_sha224_init(block_state);
+  Hacl_Hash_SHA2_sha224_init(block_state);
   return p;
 }
 
-void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA2_reset_224(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
-  Hacl_SHA2_Scalar32_sha224_init(block_state);
+  Hacl_Hash_SHA2_sha224_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_224(
-  Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_224(
+  Hacl_Streaming_MD_state_32 *state,
   uint8_t *input,
   uint32_t input_len
 )
 {
-  return update_224_256(p, input, input_len);
+  return update_224_256(state, input, input_len);
 }
 
 /**
-Write the resulting hash into `dst`, an array of 28 bytes. The state remains
-valid after a call to `finish_224`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 28 bytes. The state remains
+valid after a call to `digest_224`, meaning the user may feed more data into
 the hash via `update_224`.
 */
-void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_32 scrut = *p;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  sha224_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  sha224_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_SHA2_Scalar32_sha224_update_last(prev_len_last + (uint64_t)r,
-    r,
-    buf_last,
-    tmp_block_state);
-  Hacl_SHA2_Scalar32_sha224_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA2_sha224_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state);
+  Hacl_Hash_SHA2_sha224_finish(tmp_block_state, output);
 }
 
-void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_MD_state_32 *p)
+void Hacl_Hash_SHA2_free_224(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_SHA2_free_256(p);
+  Hacl_Hash_SHA2_free_256(state);
 }
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 28 bytes.
 */
-void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_224(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint8_t *ib = input;
-  uint8_t *rb = dst;
+  uint8_t *rb = output;
   uint32_t st[8U] = { 0U };
-  Hacl_SHA2_Scalar32_sha224_init(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  Hacl_Hash_SHA2_sha224_init(st);
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
-  Hacl_SHA2_Scalar32_sha224_update_last(len_, rem, lb, st);
-  Hacl_SHA2_Scalar32_sha224_finish(st, rb);
+  Hacl_Hash_SHA2_sha224_update_last(len_, rem, lb, st);
+  Hacl_Hash_SHA2_sha224_finish(st, rb);
 }
 
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void)
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_512(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64
   *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
   p[0U] = s;
-  Hacl_SHA2_Scalar32_sha512_init(block_state);
+  Hacl_Hash_SHA2_sha512_init(block_state);
   return p;
 }
 
@@ -937,16 +874,16 @@ The state is to be freed by calling `free_512`. Cloning the state this way is
 useful, for instance, if your control-flow diverges and you need to feed
 more (different) data into the hash in each branch.
 */
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state_64 *s0)
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_copy_512(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_MD_state_64 scrut = *s0;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint64_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)128U * sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
-  memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  memcpy(buf, buf0, 128U * sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
+  memcpy(block_state, block_state0, 8U * sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_64
@@ -955,54 +892,54 @@ Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state
   return p;
 }
 
-void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s)
+void Hacl_Hash_SHA2_reset_512(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_MD_state_64 scrut = *s;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint64_t *block_state = scrut.block_state;
-  Hacl_SHA2_Scalar32_sha512_init(block_state);
+  Hacl_Hash_SHA2_sha512_init(block_state);
   Hacl_Streaming_MD_state_64
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 static inline Hacl_Streaming_Types_error_code
-update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
+update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_MD_state_64 s = *p;
+  Hacl_Streaming_MD_state_64 s = *state;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)18446744073709551615U - total_len)
+  if ((uint64_t)chunk_len > 18446744073709551615ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)128U;
+    sz = 128U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    sz = (uint32_t)(total_len % (uint64_t)128U);
   }
-  if (len <= (uint32_t)128U - sz)
+  if (chunk_len <= 128U - sz)
   {
-    Hacl_Streaming_MD_state_64 s1 = *p;
+    Hacl_Streaming_MD_state_64 s1 = *state;
     uint64_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
       (
         (Hacl_Streaming_MD_state_64){
@@ -1012,76 +949,74 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_MD_state_64 s1 = *p;
+    Hacl_Streaming_MD_state_64 s1 = *state;
     uint64_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+      Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf, block_state1);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)128U == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
-      data1,
-      block_state1);
+    uint32_t n_blocks = (chunk_len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_64){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
-    uint32_t diff = (uint32_t)128U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_MD_state_64 s1 = *p;
+    uint32_t diff = 128U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Streaming_MD_state_64 s1 = *state;
     uint64_t *block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)128U;
+      sz10 = 128U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_64){
@@ -1090,55 +1025,48 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_MD_state_64 s10 = *p;
+    Hacl_Streaming_MD_state_64 s10 = *state;
     uint64_t *block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+      Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf, block_state1);
     }
     uint32_t ite;
     if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    ((uint64_t)(chunk_len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
-      data11,
-      block_state1);
+    uint32_t n_blocks = (chunk_len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1);
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
         (Hacl_Streaming_MD_state_64){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
@@ -1148,198 +1076,198 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
 /**
 Feed an arbitrary amount of data into the hash. This function returns 0 for
 success, or 1 if the combined length of all of the data passed to `update_512`
-(since the last call to `init_512`) exceeds 2^125-1 bytes.
+(since the last call to `reset_512`) exceeds 2^125-1 bytes.
 
 This function is identical to the update function for SHA2_384.
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_512(
-  Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_512(
+  Hacl_Streaming_MD_state_64 *state,
   uint8_t *input,
   uint32_t input_len
 )
 {
-  return update_384_512(p, input, input_len);
+  return update_384_512(state, input, input_len);
 }
 
 /**
-Write the resulting hash into `dst`, an array of 64 bytes. The state remains
-valid after a call to `finish_512`, meaning the user may feed more data into
-the hash via `update_512`. (The finish_512 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 64 bytes. The state remains
+valid after a call to `digest_512`, meaning the user may feed more data into
+the hash via `update_512`. (The digest_512 function operates on an internal copy of
 the state and therefore does not invalidate the client-held state `p`.)
 */
-void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_512(Hacl_Streaming_MD_state_64 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_64 scrut = *p;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint64_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_Hash_SHA2_sha512_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
+  Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
       FStar_UInt128_uint64_to_uint128((uint64_t)r)),
     r,
     buf_last,
     tmp_block_state);
-  Hacl_SHA2_Scalar32_sha512_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA2_sha512_finish(tmp_block_state, output);
 }
 
 /**
-Free a state allocated with `create_in_512`.
+Free a state allocated with `malloc_512`.
 
 This function is identical to the free function for SHA2_384.
 */
-void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_MD_state_64 *s)
+void Hacl_Hash_SHA2_free_512(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_MD_state_64 scrut = *s;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint64_t *block_state = scrut.block_state;
   KRML_HOST_FREE(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 64 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 64 bytes.
 */
-void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_512(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint8_t *ib = input;
-  uint8_t *rb = dst;
+  uint8_t *rb = output;
   uint64_t st[8U] = { 0U };
-  Hacl_SHA2_Scalar32_sha512_init(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  Hacl_Hash_SHA2_sha512_init(st);
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  Hacl_Hash_SHA2_sha512_update_nblocks(input_len, ib, st);
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
-  Hacl_SHA2_Scalar32_sha512_update_last(len_, rem, lb, st);
-  Hacl_SHA2_Scalar32_sha512_finish(st, rb);
+  Hacl_Hash_SHA2_sha512_update_last(len_, rem, lb, st);
+  Hacl_Hash_SHA2_sha512_finish(st, rb);
 }
 
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void)
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_384(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64
   *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
   p[0U] = s;
-  Hacl_SHA2_Scalar32_sha384_init(block_state);
+  Hacl_Hash_SHA2_sha384_init(block_state);
   return p;
 }
 
-void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s)
+void Hacl_Hash_SHA2_reset_384(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_MD_state_64 scrut = *s;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint64_t *block_state = scrut.block_state;
-  Hacl_SHA2_Scalar32_sha384_init(block_state);
+  Hacl_Hash_SHA2_sha384_init(block_state);
   Hacl_Streaming_MD_state_64
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_384(
-  Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_384(
+  Hacl_Streaming_MD_state_64 *state,
   uint8_t *input,
   uint32_t input_len
 )
 {
-  return update_384_512(p, input, input_len);
+  return update_384_512(state, input, input_len);
 }
 
 /**
-Write the resulting hash into `dst`, an array of 48 bytes. The state remains
-valid after a call to `finish_384`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 48 bytes. The state remains
+valid after a call to `digest_384`, meaning the user may feed more data into
 the hash via `update_384`.
 */
-void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_384(Hacl_Streaming_MD_state_64 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_64 scrut = *p;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint64_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_Hash_SHA2_sha384_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
+  Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
       FStar_UInt128_uint64_to_uint128((uint64_t)r)),
     r,
     buf_last,
     tmp_block_state);
-  Hacl_SHA2_Scalar32_sha384_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA2_sha384_finish(tmp_block_state, output);
 }
 
-void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_MD_state_64 *p)
+void Hacl_Hash_SHA2_free_384(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_SHA2_free_512(p);
+  Hacl_Hash_SHA2_free_512(state);
 }
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 48 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 48 bytes.
 */
-void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_384(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint8_t *ib = input;
-  uint8_t *rb = dst;
+  uint8_t *rb = output;
   uint64_t st[8U] = { 0U };
-  Hacl_SHA2_Scalar32_sha384_init(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  Hacl_Hash_SHA2_sha384_init(st);
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  Hacl_Hash_SHA2_sha384_update_nblocks(input_len, ib, st);
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
-  Hacl_SHA2_Scalar32_sha384_update_last(len_, rem, lb, st);
-  Hacl_SHA2_Scalar32_sha384_finish(st, rb);
+  Hacl_Hash_SHA2_sha384_update_last(len_, rem, lb, st);
+  Hacl_Hash_SHA2_sha384_finish(st, rb);
 }
 
diff --git a/src/Hacl_Hash_SHA3.c b/src/Hacl_Hash_SHA3.c
index 19d13b1b..1b821d07 100644
--- a/src/Hacl_Hash_SHA3.c
+++ b/src/Hacl_Hash_SHA3.c
@@ -31,27 +31,27 @@ static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     default:
       {
@@ -67,19 +67,19 @@ static uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
@@ -97,10 +97,10 @@ Hacl_Hash_SHA3_update_multi_sha3(
   uint32_t n_blocks
 )
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
     uint8_t *block = blocks + i * block_len(a);
-    Hacl_Impl_SHA3_absorb_inner(block_len(a), block, s);
+    Hacl_Hash_SHA3_absorb_inner(block_len(a), block, s);
   }
 }
 
@@ -115,139 +115,139 @@ Hacl_Hash_SHA3_update_last_sha3(
   uint8_t suffix;
   if (a == Spec_Hash_Definitions_Shake128 || a == Spec_Hash_Definitions_Shake256)
   {
-    suffix = (uint8_t)0x1fU;
+    suffix = 0x1fU;
   }
   else
   {
-    suffix = (uint8_t)0x06U;
+    suffix = 0x06U;
   }
   uint32_t len = block_len(a);
   if (input_len == len)
   {
-    Hacl_Impl_SHA3_absorb_inner(len, input, s);
+    Hacl_Hash_SHA3_absorb_inner(len, input, s);
     uint8_t lastBlock_[200U] = { 0U };
     uint8_t *lastBlock = lastBlock_;
-    memcpy(lastBlock, input + input_len, (uint32_t)0U * sizeof (uint8_t));
+    memcpy(lastBlock, input + input_len, 0U * sizeof (uint8_t));
     lastBlock[0U] = suffix;
-    Hacl_Impl_SHA3_loadState(len, lastBlock, s);
-    if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && (uint32_t)0U == len - (uint32_t)1U)
+    Hacl_Hash_SHA3_loadState(len, lastBlock, s);
+    if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U == len - 1U)
     {
-      Hacl_Impl_SHA3_state_permute(s);
+      Hacl_Hash_SHA3_state_permute(s);
     }
     uint8_t nextBlock_[200U] = { 0U };
     uint8_t *nextBlock = nextBlock_;
-    nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
-    Hacl_Impl_SHA3_loadState(len, nextBlock, s);
-    Hacl_Impl_SHA3_state_permute(s);
+    nextBlock[len - 1U] = 0x80U;
+    Hacl_Hash_SHA3_loadState(len, nextBlock, s);
+    Hacl_Hash_SHA3_state_permute(s);
     return;
   }
   uint8_t lastBlock_[200U] = { 0U };
   uint8_t *lastBlock = lastBlock_;
   memcpy(lastBlock, input, input_len * sizeof (uint8_t));
   lastBlock[input_len] = suffix;
-  Hacl_Impl_SHA3_loadState(len, lastBlock, s);
-  if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && input_len == len - (uint32_t)1U)
+  Hacl_Hash_SHA3_loadState(len, lastBlock, s);
+  if (!(((uint32_t)suffix & 0x80U) == 0U) && input_len == len - 1U)
   {
-    Hacl_Impl_SHA3_state_permute(s);
+    Hacl_Hash_SHA3_state_permute(s);
   }
   uint8_t nextBlock_[200U] = { 0U };
   uint8_t *nextBlock = nextBlock_;
-  nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
-  Hacl_Impl_SHA3_loadState(len, nextBlock, s);
-  Hacl_Impl_SHA3_state_permute(s);
+  nextBlock[len - 1U] = 0x80U;
+  Hacl_Hash_SHA3_loadState(len, nextBlock, s);
+  Hacl_Hash_SHA3_state_permute(s);
 }
 
 typedef struct hash_buf2_s
 {
-  Hacl_Streaming_Keccak_hash_buf fst;
-  Hacl_Streaming_Keccak_hash_buf snd;
+  Hacl_Hash_SHA3_hash_buf fst;
+  Hacl_Hash_SHA3_hash_buf snd;
 }
 hash_buf2;
 
-Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s)
+Spec_Hash_Definitions_hash_alg Hacl_Hash_SHA3_get_alg(Hacl_Hash_SHA3_state_t *s)
 {
-  Hacl_Streaming_Keccak_hash_buf block_state = (*s).block_state;
+  Hacl_Hash_SHA3_hash_buf block_state = (*s).block_state;
   return block_state.fst;
 }
 
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_hash_alg a)
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_hash_alg a)
 {
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(a));
   uint8_t *buf0 = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
-  Hacl_Streaming_Keccak_hash_buf block_state = { .fst = a, .snd = buf };
-  Hacl_Streaming_Keccak_state
-  s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Keccak_state
-  *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
+  Hacl_Hash_SHA3_hash_buf block_state = { .fst = a, .snd = buf };
+  Hacl_Hash_SHA3_state_t
+  s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)0U };
+  Hacl_Hash_SHA3_state_t
+  *p = (Hacl_Hash_SHA3_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_SHA3_state_t));
   p[0U] = s;
   uint64_t *s1 = block_state.snd;
-  memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
+  memset(s1, 0U, 25U * sizeof (uint64_t));
   return p;
 }
 
-void Hacl_Streaming_Keccak_free(Hacl_Streaming_Keccak_state *s)
+void Hacl_Hash_SHA3_free(Hacl_Hash_SHA3_state_t *state)
 {
-  Hacl_Streaming_Keccak_state scrut = *s;
+  Hacl_Hash_SHA3_state_t scrut = *state;
   uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
-  uint64_t *s1 = block_state.snd;
-  KRML_HOST_FREE(s1);
-  KRML_HOST_FREE(buf);
+  Hacl_Hash_SHA3_hash_buf block_state = scrut.block_state;
+  uint64_t *s = block_state.snd;
   KRML_HOST_FREE(s);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
 }
 
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_state *s0)
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_copy(Hacl_Hash_SHA3_state_t *state)
 {
-  Hacl_Streaming_Keccak_state scrut0 = *s0;
-  Hacl_Streaming_Keccak_hash_buf block_state0 = scrut0.block_state;
+  Hacl_Hash_SHA3_state_t scrut0 = *state;
+  Hacl_Hash_SHA3_hash_buf block_state0 = scrut0.block_state;
   uint8_t *buf0 = scrut0.buf;
   uint64_t total_len0 = scrut0.total_len;
   Spec_Hash_Definitions_hash_alg i = block_state0.fst;
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(i));
   uint8_t *buf1 = (uint8_t *)KRML_HOST_CALLOC(block_len(i), sizeof (uint8_t));
   memcpy(buf1, buf0, block_len(i) * sizeof (uint8_t));
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
-  Hacl_Streaming_Keccak_hash_buf block_state = { .fst = i, .snd = buf };
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
+  Hacl_Hash_SHA3_hash_buf block_state = { .fst = i, .snd = buf };
   hash_buf2 scrut = { .fst = block_state0, .snd = block_state };
   uint64_t *s_dst = scrut.snd.snd;
   uint64_t *s_src = scrut.fst.snd;
-  memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
-  Hacl_Streaming_Keccak_state
+  memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
+  Hacl_Hash_SHA3_state_t
   s = { .block_state = block_state, .buf = buf1, .total_len = total_len0 };
-  Hacl_Streaming_Keccak_state
-  *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
+  Hacl_Hash_SHA3_state_t
+  *p = (Hacl_Hash_SHA3_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_SHA3_state_t));
   p[0U] = s;
   return p;
 }
 
-void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s)
+void Hacl_Hash_SHA3_reset(Hacl_Hash_SHA3_state_t *state)
 {
-  Hacl_Streaming_Keccak_state scrut = *s;
+  Hacl_Hash_SHA3_state_t scrut = *state;
   uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
+  Hacl_Hash_SHA3_hash_buf block_state = scrut.block_state;
   Spec_Hash_Definitions_hash_alg i = block_state.fst;
-  KRML_HOST_IGNORE(i);
-  uint64_t *s1 = block_state.snd;
-  memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
-  Hacl_Streaming_Keccak_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  KRML_MAYBE_UNUSED_VAR(i);
+  uint64_t *s = block_state.snd;
+  memset(s, 0U, 25U * sizeof (uint64_t));
+  Hacl_Hash_SHA3_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint32_t len)
+Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_Keccak_state s = *p;
-  Hacl_Streaming_Keccak_hash_buf block_state = s.block_state;
+  Hacl_Hash_SHA3_state_t s = *state;
+  Hacl_Hash_SHA3_hash_buf block_state = s.block_state;
   uint64_t total_len = s.total_len;
   Spec_Hash_Definitions_hash_alg i = block_state.fst;
-  if ((uint64_t)len > (uint64_t)0xFFFFFFFFFFFFFFFFU - total_len)
+  if ((uint64_t)chunk_len > 0xFFFFFFFFFFFFFFFFULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)block_len(i) == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)block_len(i) == 0ULL && total_len > 0ULL)
   {
     sz = block_len(i);
   }
@@ -255,14 +255,14 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
   {
     sz = (uint32_t)(total_len % (uint64_t)block_len(i));
   }
-  if (len <= block_len(i) - sz)
+  if (chunk_len <= block_len(i) - sz)
   {
-    Hacl_Streaming_Keccak_state s1 = *p;
-    Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
+    Hacl_Hash_SHA3_state_t s1 = *state;
+    Hacl_Hash_SHA3_hash_buf block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -271,26 +271,20 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
-      (
-        (Hacl_Streaming_Keccak_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
+      ((Hacl_Hash_SHA3_state_t){ .block_state = block_state1, .buf = buf, .total_len = total_len2 });
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_Keccak_state s1 = *p;
-    Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
+    Hacl_Hash_SHA3_state_t s1 = *state;
+    Hacl_Hash_SHA3_hash_buf block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -298,52 +292,52 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     {
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
       uint64_t *s2 = block_state1.snd;
       Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1));
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)block_len(i) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)block_len(i) == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
       ite = block_len(i);
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)block_len(i));
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)block_len(i));
     }
-    uint32_t n_blocks = (len - ite) / block_len(i);
+    uint32_t n_blocks = (chunk_len - ite) / block_len(i);
     uint32_t data1_len = n_blocks * block_len(i);
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
     Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
     uint64_t *s2 = block_state1.snd;
     Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, data1_len / block_len(a1));
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
-        (Hacl_Streaming_Keccak_state){
+        (Hacl_Hash_SHA3_state_t){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
     uint32_t diff = block_len(i) - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Keccak_state s1 = *p;
-    Hacl_Streaming_Keccak_hash_buf block_state10 = s1.block_state;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_SHA3_state_t s1 = *state;
+    Hacl_Hash_SHA3_hash_buf block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)block_len(i) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)block_len(i) == 0ULL && total_len10 > 0ULL)
     {
       sz10 = block_len(i);
     }
@@ -352,23 +346,23 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
       sz10 = (uint32_t)(total_len10 % (uint64_t)block_len(i));
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
-        (Hacl_Streaming_Keccak_state){
+        (Hacl_Hash_SHA3_state_t){
           .block_state = block_state10,
           .buf = buf0,
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_Keccak_state s10 = *p;
-    Hacl_Streaming_Keccak_hash_buf block_state1 = s10.block_state;
+    Hacl_Hash_SHA3_state_t s10 = *state;
+    Hacl_Hash_SHA3_hash_buf block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -376,7 +370,7 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     {
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
       uint64_t *s2 = block_state1.snd;
@@ -385,35 +379,35 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     uint32_t ite;
     if
     (
-      (uint64_t)(len - diff)
+      (uint64_t)(chunk_len - diff)
       % (uint64_t)block_len(i)
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
+      == 0ULL
+      && (uint64_t)(chunk_len - diff) > 0ULL
     )
     {
       ite = block_len(i);
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)block_len(i));
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)block_len(i));
     }
-    uint32_t n_blocks = (len - diff - ite) / block_len(i);
+    uint32_t n_blocks = (chunk_len - diff - ite) / block_len(i);
     uint32_t data1_len = n_blocks * block_len(i);
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
     Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
     uint64_t *s2 = block_state1.snd;
-    Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data11, data1_len / block_len(a1));
+    Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, data1_len / block_len(a1));
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
-        (Hacl_Streaming_Keccak_state){
+        (Hacl_Hash_SHA3_state_t){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
@@ -421,19 +415,19 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
 }
 
 static void
-finish_(
+digest_(
   Spec_Hash_Definitions_hash_alg a,
-  Hacl_Streaming_Keccak_state *p,
-  uint8_t *dst,
+  Hacl_Hash_SHA3_state_t *state,
+  uint8_t *output,
   uint32_t l
 )
 {
-  Hacl_Streaming_Keccak_state scrut0 = *p;
-  Hacl_Streaming_Keccak_hash_buf block_state = scrut0.block_state;
+  Hacl_Hash_SHA3_state_t scrut0 = *state;
+  Hacl_Hash_SHA3_hash_buf block_state = scrut0.block_state;
   uint8_t *buf_ = scrut0.buf;
   uint64_t total_len = scrut0.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)block_len(a) == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)block_len(a) == 0ULL && total_len > 0ULL)
   {
     r = block_len(a);
   }
@@ -443,13 +437,13 @@ finish_(
   }
   uint8_t *buf_1 = buf_;
   uint64_t buf[25U] = { 0U };
-  Hacl_Streaming_Keccak_hash_buf tmp_block_state = { .fst = a, .snd = buf };
+  Hacl_Hash_SHA3_hash_buf tmp_block_state = { .fst = a, .snd = buf };
   hash_buf2 scrut = { .fst = block_state, .snd = tmp_block_state };
   uint64_t *s_dst = scrut.snd.snd;
   uint64_t *s_src = scrut.fst.snd;
-  memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % block_len(a) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(a) == 0U && r > 0U)
   {
     ite = block_len(a);
   }
@@ -461,7 +455,7 @@ finish_(
   uint8_t *buf_multi = buf_1;
   Spec_Hash_Definitions_hash_alg a1 = tmp_block_state.fst;
   uint64_t *s0 = tmp_block_state.snd;
-  Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, (uint32_t)0U / block_len(a1));
+  Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, 0U / block_len(a1));
   Spec_Hash_Definitions_hash_alg a10 = tmp_block_state.fst;
   uint64_t *s1 = tmp_block_state.snd;
   Hacl_Hash_SHA3_update_last_sha3(a10, s1, buf_last, r);
@@ -469,258 +463,182 @@ finish_(
   uint64_t *s = tmp_block_state.snd;
   if (a11 == Spec_Hash_Definitions_Shake128 || a11 == Spec_Hash_Definitions_Shake256)
   {
-    Hacl_Impl_SHA3_squeeze(s, block_len(a11), l, dst);
+    Hacl_Hash_SHA3_squeeze0(s, block_len(a11), l, output);
     return;
   }
-  Hacl_Impl_SHA3_squeeze(s, block_len(a11), hash_len(a11), dst);
+  Hacl_Hash_SHA3_squeeze0(s, block_len(a11), hash_len(a11), output);
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_finish(Hacl_Streaming_Keccak_state *s, uint8_t *dst)
+Hacl_Hash_SHA3_digest(Hacl_Hash_SHA3_state_t *state, uint8_t *output)
 {
-  Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(state);
   if (a1 == Spec_Hash_Definitions_Shake128 || a1 == Spec_Hash_Definitions_Shake256)
   {
     return Hacl_Streaming_Types_InvalidAlgorithm;
   }
-  finish_(a1, s, dst, hash_len(a1));
+  digest_(a1, state, output, hash_len(a1));
   return Hacl_Streaming_Types_Success;
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint32_t l)
+Hacl_Hash_SHA3_squeeze(Hacl_Hash_SHA3_state_t *s, uint8_t *dst, uint32_t l)
 {
-  Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(s);
   if (!(a1 == Spec_Hash_Definitions_Shake128 || a1 == Spec_Hash_Definitions_Shake256))
   {
     return Hacl_Streaming_Types_InvalidAlgorithm;
   }
-  if (l == (uint32_t)0U)
+  if (l == 0U)
   {
     return Hacl_Streaming_Types_InvalidLength;
   }
-  finish_(a1, s, dst, l);
+  digest_(a1, s, dst, l);
   return Hacl_Streaming_Types_Success;
 }
 
-uint32_t Hacl_Streaming_Keccak_block_len(Hacl_Streaming_Keccak_state *s)
+uint32_t Hacl_Hash_SHA3_block_len(Hacl_Hash_SHA3_state_t *s)
 {
-  Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(s);
   return block_len(a1);
 }
 
-uint32_t Hacl_Streaming_Keccak_hash_len(Hacl_Streaming_Keccak_state *s)
+uint32_t Hacl_Hash_SHA3_hash_len(Hacl_Hash_SHA3_state_t *s)
 {
-  Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(s);
   return hash_len(a1);
 }
 
-bool Hacl_Streaming_Keccak_is_shake(Hacl_Streaming_Keccak_state *s)
+bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s)
 {
-  Spec_Hash_Definitions_hash_alg uu____0 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg uu____0 = Hacl_Hash_SHA3_get_alg(s);
   return uu____0 == Spec_Hash_Definitions_Shake128 || uu____0 == Spec_Hash_Definitions_Shake256;
 }
 
 void
-Hacl_SHA3_shake128_hacl(
+Hacl_Hash_SHA3_shake128_hacl(
   uint32_t inputByteLen,
   uint8_t *input,
   uint32_t outputByteLen,
   uint8_t *output
 )
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1344U,
-    (uint32_t)256U,
-    inputByteLen,
-    input,
-    (uint8_t)0x1FU,
-    outputByteLen,
-    output);
+  Hacl_Hash_SHA3_keccak(1344U, 256U, inputByteLen, input, 0x1FU, outputByteLen, output);
 }
 
 void
-Hacl_SHA3_shake256_hacl(
+Hacl_Hash_SHA3_shake256_hacl(
   uint32_t inputByteLen,
   uint8_t *input,
   uint32_t outputByteLen,
   uint8_t *output
 )
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1088U,
-    (uint32_t)512U,
-    inputByteLen,
-    input,
-    (uint8_t)0x1FU,
-    outputByteLen,
-    output);
+  Hacl_Hash_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x1FU, outputByteLen, output);
 }
 
-void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1152U,
-    (uint32_t)448U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)28U,
-    output);
+  Hacl_Hash_SHA3_keccak(1152U, 448U, inputByteLen, input, 0x06U, 28U, output);
 }
 
-void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1088U,
-    (uint32_t)512U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)32U,
-    output);
+  Hacl_Hash_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x06U, 32U, output);
 }
 
-void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)832U,
-    (uint32_t)768U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)48U,
-    output);
+  Hacl_Hash_SHA3_keccak(832U, 768U, inputByteLen, input, 0x06U, 48U, output);
 }
 
-void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)576U,
-    (uint32_t)1024U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)64U,
-    output);
+  Hacl_Hash_SHA3_keccak(576U, 1024U, inputByteLen, input, 0x06U, 64U, output);
 }
 
 static const
 uint32_t
 keccak_rotc[24U] =
   {
-    (uint32_t)1U, (uint32_t)3U, (uint32_t)6U, (uint32_t)10U, (uint32_t)15U, (uint32_t)21U,
-    (uint32_t)28U, (uint32_t)36U, (uint32_t)45U, (uint32_t)55U, (uint32_t)2U, (uint32_t)14U,
-    (uint32_t)27U, (uint32_t)41U, (uint32_t)56U, (uint32_t)8U, (uint32_t)25U, (uint32_t)43U,
-    (uint32_t)62U, (uint32_t)18U, (uint32_t)39U, (uint32_t)61U, (uint32_t)20U, (uint32_t)44U
+    1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U,
+    39U, 61U, 20U, 44U
   };
 
 static const
 uint32_t
 keccak_piln[24U] =
   {
-    (uint32_t)10U, (uint32_t)7U, (uint32_t)11U, (uint32_t)17U, (uint32_t)18U, (uint32_t)3U,
-    (uint32_t)5U, (uint32_t)16U, (uint32_t)8U, (uint32_t)21U, (uint32_t)24U, (uint32_t)4U,
-    (uint32_t)15U, (uint32_t)23U, (uint32_t)19U, (uint32_t)13U, (uint32_t)12U, (uint32_t)2U,
-    (uint32_t)20U, (uint32_t)14U, (uint32_t)22U, (uint32_t)9U, (uint32_t)6U, (uint32_t)1U
+    10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U,
+    22U, 9U, 6U, 1U
   };
 
 static const
 uint64_t
 keccak_rndc[24U] =
   {
-    (uint64_t)0x0000000000000001U, (uint64_t)0x0000000000008082U, (uint64_t)0x800000000000808aU,
-    (uint64_t)0x8000000080008000U, (uint64_t)0x000000000000808bU, (uint64_t)0x0000000080000001U,
-    (uint64_t)0x8000000080008081U, (uint64_t)0x8000000000008009U, (uint64_t)0x000000000000008aU,
-    (uint64_t)0x0000000000000088U, (uint64_t)0x0000000080008009U, (uint64_t)0x000000008000000aU,
-    (uint64_t)0x000000008000808bU, (uint64_t)0x800000000000008bU, (uint64_t)0x8000000000008089U,
-    (uint64_t)0x8000000000008003U, (uint64_t)0x8000000000008002U, (uint64_t)0x8000000000000080U,
-    (uint64_t)0x000000000000800aU, (uint64_t)0x800000008000000aU, (uint64_t)0x8000000080008081U,
-    (uint64_t)0x8000000000008080U, (uint64_t)0x0000000080000001U, (uint64_t)0x8000000080008008U
+    0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
+    0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
+    0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
+    0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
+    0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL,
+    0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
   };
 
-void Hacl_Impl_SHA3_state_permute(uint64_t *s)
+void Hacl_Hash_SHA3_state_permute(uint64_t *s)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)24U; i0++)
+  for (uint32_t i0 = 0U; i0 < 24U; i0++)
   {
     uint64_t _C[5U] = { 0U };
     KRML_MAYBE_FOR5(i,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      _C[i] =
-        s[i
-        + (uint32_t)0U]
-        ^
-          (s[i
-          + (uint32_t)5U]
-          ^ (s[i + (uint32_t)10U] ^ (s[i + (uint32_t)15U] ^ s[i + (uint32_t)20U]))););
+      0U,
+      5U,
+      1U,
+      _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U]))););
     KRML_MAYBE_FOR5(i1,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      uint64_t uu____0 = _C[(i1 + (uint32_t)1U) % (uint32_t)5U];
-      uint64_t
-      _D =
-        _C[(i1 + (uint32_t)4U)
-        % (uint32_t)5U]
-        ^ (uu____0 << (uint32_t)1U | uu____0 >> (uint32_t)63U);
-      KRML_MAYBE_FOR5(i,
-        (uint32_t)0U,
-        (uint32_t)5U,
-        (uint32_t)1U,
-        s[i1 + (uint32_t)5U * i] = s[i1 + (uint32_t)5U * i] ^ _D;););
+      0U,
+      5U,
+      1U,
+      uint64_t uu____0 = _C[(i1 + 1U) % 5U];
+      uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U);
+      KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;););
     uint64_t x = s[1U];
     uint64_t current = x;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+    for (uint32_t i = 0U; i < 24U; i++)
     {
       uint32_t _Y = keccak_piln[i];
       uint32_t r = keccak_rotc[i];
       uint64_t temp = s[_Y];
       uint64_t uu____1 = current;
-      s[_Y] = uu____1 << r | uu____1 >> ((uint32_t)64U - r);
+      s[_Y] = uu____1 << r | uu____1 >> (64U - r);
       current = temp;
     }
     KRML_MAYBE_FOR5(i,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      uint64_t
-      v0 =
-        s[(uint32_t)0U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)1U + (uint32_t)5U * i] & s[(uint32_t)2U + (uint32_t)5U * i]);
-      uint64_t
-      v1 =
-        s[(uint32_t)1U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)2U + (uint32_t)5U * i] & s[(uint32_t)3U + (uint32_t)5U * i]);
-      uint64_t
-      v2 =
-        s[(uint32_t)2U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)3U + (uint32_t)5U * i] & s[(uint32_t)4U + (uint32_t)5U * i]);
-      uint64_t
-      v3 =
-        s[(uint32_t)3U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)4U + (uint32_t)5U * i] & s[(uint32_t)0U + (uint32_t)5U * i]);
-      uint64_t
-      v4 =
-        s[(uint32_t)4U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)0U + (uint32_t)5U * i] & s[(uint32_t)1U + (uint32_t)5U * i]);
-      s[(uint32_t)0U + (uint32_t)5U * i] = v0;
-      s[(uint32_t)1U + (uint32_t)5U * i] = v1;
-      s[(uint32_t)2U + (uint32_t)5U * i] = v2;
-      s[(uint32_t)3U + (uint32_t)5U * i] = v3;
-      s[(uint32_t)4U + (uint32_t)5U * i] = v4;);
+      0U,
+      5U,
+      1U,
+      uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]);
+      uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]);
+      uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]);
+      uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]);
+      uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]);
+      s[0U + 5U * i] = v0;
+      s[1U + 5U * i] = v1;
+      s[2U + 5U * i] = v2;
+      s[3U + 5U * i] = v3;
+      s[4U + 5U * i] = v4;);
     uint64_t c = keccak_rndc[i0];
     s[0U] = s[0U] ^ c;
   }
 }
 
-void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
+void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
 {
   uint8_t block[200U] = { 0U };
   memcpy(block, input, rateInBytes * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
-    uint64_t u = load64_le(block + i * (uint32_t)8U);
+    uint64_t u = load64_le(block + i * 8U);
     uint64_t x = u;
     s[i] = s[i] ^ x;
   }
@@ -729,18 +647,18 @@ void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
 static void storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res)
 {
   uint8_t block[200U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
     uint64_t sj = s[i];
-    store64_le(block + i * (uint32_t)8U, sj);
+    store64_le(block + i * 8U, sj);
   }
   memcpy(res, block, rateInBytes * sizeof (uint8_t));
 }
 
-void Hacl_Impl_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s)
+void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s)
 {
-  Hacl_Impl_SHA3_loadState(rateInBytes, block, s);
-  Hacl_Impl_SHA3_state_permute(s);
+  Hacl_Hash_SHA3_loadState(rateInBytes, block, s);
+  Hacl_Hash_SHA3_state_permute(s);
 }
 
 static void
@@ -754,30 +672,30 @@ absorb(
 {
   uint32_t n_blocks = inputByteLen / rateInBytes;
   uint32_t rem = inputByteLen % rateInBytes;
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
     uint8_t *block = input + i * rateInBytes;
-    Hacl_Impl_SHA3_absorb_inner(rateInBytes, block, s);
+    Hacl_Hash_SHA3_absorb_inner(rateInBytes, block, s);
   }
   uint8_t *last = input + n_blocks * rateInBytes;
   uint8_t lastBlock_[200U] = { 0U };
   uint8_t *lastBlock = lastBlock_;
   memcpy(lastBlock, last, rem * sizeof (uint8_t));
   lastBlock[rem] = delimitedSuffix;
-  Hacl_Impl_SHA3_loadState(rateInBytes, lastBlock, s);
-  if (!((delimitedSuffix & (uint8_t)0x80U) == (uint8_t)0U) && rem == rateInBytes - (uint32_t)1U)
+  Hacl_Hash_SHA3_loadState(rateInBytes, lastBlock, s);
+  if (!(((uint32_t)delimitedSuffix & 0x80U) == 0U) && rem == rateInBytes - 1U)
   {
-    Hacl_Impl_SHA3_state_permute(s);
+    Hacl_Hash_SHA3_state_permute(s);
   }
   uint8_t nextBlock_[200U] = { 0U };
   uint8_t *nextBlock = nextBlock_;
-  nextBlock[rateInBytes - (uint32_t)1U] = (uint8_t)0x80U;
-  Hacl_Impl_SHA3_loadState(rateInBytes, nextBlock, s);
-  Hacl_Impl_SHA3_state_permute(s);
+  nextBlock[rateInBytes - 1U] = 0x80U;
+  Hacl_Hash_SHA3_loadState(rateInBytes, nextBlock, s);
+  Hacl_Hash_SHA3_state_permute(s);
 }
 
 void
-Hacl_Impl_SHA3_squeeze(
+Hacl_Hash_SHA3_squeeze0(
   uint64_t *s,
   uint32_t rateInBytes,
   uint32_t outputByteLen,
@@ -788,16 +706,16 @@ Hacl_Impl_SHA3_squeeze(
   uint32_t remOut = outputByteLen % rateInBytes;
   uint8_t *last = output + outputByteLen - remOut;
   uint8_t *blocks = output;
-  for (uint32_t i = (uint32_t)0U; i < outBlocks; i++)
+  for (uint32_t i = 0U; i < outBlocks; i++)
   {
     storeState(rateInBytes, s, blocks + i * rateInBytes);
-    Hacl_Impl_SHA3_state_permute(s);
+    Hacl_Hash_SHA3_state_permute(s);
   }
   storeState(remOut, s, last);
 }
 
 void
-Hacl_Impl_SHA3_keccak(
+Hacl_Hash_SHA3_keccak(
   uint32_t rate,
   uint32_t capacity,
   uint32_t inputByteLen,
@@ -807,10 +725,10 @@ Hacl_Impl_SHA3_keccak(
   uint8_t *output
 )
 {
-  KRML_HOST_IGNORE(capacity);
-  uint32_t rateInBytes = rate / (uint32_t)8U;
+  KRML_MAYBE_UNUSED_VAR(capacity);
+  uint32_t rateInBytes = rate / 8U;
   uint64_t s[25U] = { 0U };
   absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix);
-  Hacl_Impl_SHA3_squeeze(s, rateInBytes, outputByteLen, output);
+  Hacl_Hash_SHA3_squeeze0(s, rateInBytes, outputByteLen, output);
 }
 
diff --git a/src/Hacl_K256_ECDSA.c b/src/Hacl_K256_ECDSA.c
index 2ffc1060..bbd2c615 100644
--- a/src/Hacl_K256_ECDSA.c
+++ b/src/Hacl_K256_ECDSA.c
@@ -35,27 +35,27 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
 {
   uint64_t *a0 = a;
   uint64_t *res0 = res;
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < bLen / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < bLen / 4U; i++)
   {
-    uint64_t t1 = a0[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res0 + (uint32_t)4U * i;
+    uint64_t t1 = a0[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res0 + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a0[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res0 + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a0[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res0 + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a0[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res0 + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a0[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res0 + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a0[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res0 + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a0[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res0 + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = bLen / (uint32_t)4U * (uint32_t)4U; i < bLen; i++)
+  for (uint32_t i = bLen / 4U * 4U; i < bLen; i++)
   {
     uint64_t t1 = a0[i];
     uint64_t t2 = b[i];
@@ -68,26 +68,26 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
     uint64_t *a1 = a + bLen;
     uint64_t *res1 = res + bLen;
     uint64_t c = c00;
-    for (uint32_t i = (uint32_t)0U; i < (aLen - bLen) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (aLen - bLen) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i);
     }
-    for (uint32_t i = (aLen - bLen) / (uint32_t)4U * (uint32_t)4U; i < aLen - bLen; i++)
+    for (uint32_t i = (aLen - bLen) / 4U * 4U; i < aLen - bLen; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c1 = c;
     return c1;
@@ -97,23 +97,23 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
 
 static uint64_t add4(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   return c;
@@ -121,52 +121,52 @@ static uint64_t add4(uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -174,53 +174,53 @@ static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -228,59 +228,59 @@ static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void mul4(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = a[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 static void sqr4(uint64_t *a, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -288,30 +288,30 @@ static void sqr4(uint64_t *a, uint64_t *res)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline uint64_t is_qelem_zero(uint64_t *f)
 {
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -325,33 +325,33 @@ static inline bool is_qelem_zero_vartime(uint64_t *f)
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
-  return f0 == (uint64_t)0U && f1 == (uint64_t)0U && f2 == (uint64_t)0U && f3 == (uint64_t)0U;
+  return f0 == 0ULL && f1 == 0ULL && f2 == 0ULL && f3 == 0ULL;
 }
 
 static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   uint64_t is_zero = is_qelem_zero(f);
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(f[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(f[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t is_lt_q = acc;
   return ~is_zero & is_lt_q;
 }
@@ -359,11 +359,11 @@ static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b)
 static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   bool is_zero = is_qelem_zero_vartime(f);
@@ -372,29 +372,29 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
   bool is_lt_q_b;
-  if (a3 < (uint64_t)0xffffffffffffffffU)
+  if (a3 < 0xffffffffffffffffULL)
   {
     is_lt_q_b = true;
   }
-  else if (a2 < (uint64_t)0xfffffffffffffffeU)
+  else if (a2 < 0xfffffffffffffffeULL)
   {
     is_lt_q_b = true;
   }
-  else if (a2 > (uint64_t)0xfffffffffffffffeU)
+  else if (a2 > 0xfffffffffffffffeULL)
   {
     is_lt_q_b = false;
   }
-  else if (a1 < (uint64_t)0xbaaedce6af48a03bU)
+  else if (a1 < 0xbaaedce6af48a03bULL)
   {
     is_lt_q_b = true;
   }
-  else if (a1 > (uint64_t)0xbaaedce6af48a03bU)
+  else if (a1 > 0xbaaedce6af48a03bULL)
   {
     is_lt_q_b = false;
   }
   else
   {
-    is_lt_q_b = a0 < (uint64_t)0xbfd25e8cd0364141U;
+    is_lt_q_b = a0 < 0xbfd25e8cd0364141ULL;
   }
   return !is_zero && is_lt_q_b;
 }
@@ -402,16 +402,16 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
 static inline void modq_short(uint64_t *out, uint64_t *a)
 {
   uint64_t tmp[4U] = { 0U };
-  tmp[0U] = (uint64_t)0x402da1732fc9bebfU;
-  tmp[1U] = (uint64_t)0x4551231950b75fc4U;
-  tmp[2U] = (uint64_t)0x1U;
-  tmp[3U] = (uint64_t)0x0U;
+  tmp[0U] = 0x402da1732fc9bebfULL;
+  tmp[1U] = 0x4551231950b75fc4ULL;
+  tmp[2U] = 0x1ULL;
+  tmp[3U] = 0x0ULL;
   uint64_t c = add4(a, tmp, out);
-  uint64_t mask = (uint64_t)0U - c;
+  uint64_t mask = 0ULL - c;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = out;
     uint64_t x = (mask & out[i]) | (~mask & a[i]);
     os[i] = x;);
@@ -421,35 +421,31 @@ static inline void load_qelem_modq(uint64_t *f, uint8_t *b)
 {
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
-  memcpy(tmp, f, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, f, 4U * sizeof (uint64_t));
   modq_short(f, tmp);
 }
 
 static inline void store_qelem(uint8_t *b, uint64_t *f)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(b + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(b + i * 8U, f[4U - i - 1U]););
 }
 
 static inline void qadd(uint64_t *out, uint64_t *f1, uint64_t *f2)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   add_mod4(n, f1, f2, out);
 }
 
@@ -463,33 +459,33 @@ mul_pow2_256_minus_q_add(
   uint64_t *res
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + (uint32_t)2U);
-  uint64_t tmp[len + (uint32_t)2U];
-  memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t));
-  memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), len + 2U);
+  uint64_t tmp[len + 2U];
+  memset(tmp, 0U, (len + 2U) * sizeof (uint64_t));
+  memset(tmp, 0U, (len + 2U) * sizeof (uint64_t));
   KRML_MAYBE_FOR2(i0,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint64_t bj = t01[i0];
     uint64_t *res_j = tmp + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = a[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = a[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = a[i];
       uint64_t *res_i = res_j + i;
@@ -497,9 +493,9 @@ mul_pow2_256_minus_q_add(
     }
     uint64_t r = c;
     tmp[len + i0] = r;);
-  memcpy(res + (uint32_t)2U, a, len * sizeof (uint64_t));
-  KRML_HOST_IGNORE(bn_add(resLen, res, len + (uint32_t)2U, tmp, res));
-  uint64_t c = bn_add(resLen, res, (uint32_t)4U, e, res);
+  memcpy(res + 2U, a, len * sizeof (uint64_t));
+  bn_add(resLen, res, len + 2U, tmp, res);
+  uint64_t c = bn_add(resLen, res, 4U, e, res);
   return c;
 }
 
@@ -507,34 +503,23 @@ static inline void modq(uint64_t *out, uint64_t *a)
 {
   uint64_t r[4U] = { 0U };
   uint64_t tmp[4U] = { 0U };
-  tmp[0U] = (uint64_t)0x402da1732fc9bebfU;
-  tmp[1U] = (uint64_t)0x4551231950b75fc4U;
-  tmp[2U] = (uint64_t)0x1U;
-  tmp[3U] = (uint64_t)0x0U;
+  tmp[0U] = 0x402da1732fc9bebfULL;
+  tmp[1U] = 0x4551231950b75fc4ULL;
+  tmp[2U] = 0x1ULL;
+  tmp[3U] = 0x0ULL;
   uint64_t *t01 = tmp;
   uint64_t m[7U] = { 0U };
   uint64_t p[5U] = { 0U };
-  KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)4U,
-      (uint32_t)7U,
-      t01,
-      a + (uint32_t)4U,
-      a,
-      m));
-  KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)3U,
-      (uint32_t)5U,
-      t01,
-      m + (uint32_t)4U,
-      m,
-      p));
-  uint64_t
-  c2 = mul_pow2_256_minus_q_add((uint32_t)1U, (uint32_t)4U, t01, p + (uint32_t)4U, p, r);
+  mul_pow2_256_minus_q_add(4U, 7U, t01, a + 4U, a, m);
+  mul_pow2_256_minus_q_add(3U, 5U, t01, m + 4U, m, p);
+  uint64_t c2 = mul_pow2_256_minus_q_add(1U, 4U, t01, p + 4U, p, r);
   uint64_t c0 = c2;
   uint64_t c1 = add4(r, tmp, out);
-  uint64_t mask = (uint64_t)0U - (c0 + c1);
+  uint64_t mask = 0ULL - (c0 + c1);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = out;
     uint64_t x = (mask & out[i]) | (~mask & r[i]);
     os[i] = x;);
@@ -557,10 +542,10 @@ static inline void qsqr(uint64_t *out, uint64_t *f)
 static inline void qnegate_conditional_vartime(uint64_t *f, bool is_negate)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   uint64_t zero[4U] = { 0U };
   if (is_negate)
   {
@@ -574,31 +559,31 @@ static inline bool is_qelem_le_q_halved_vartime(uint64_t *f)
   uint64_t a1 = f[1U];
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
-  if (a3 < (uint64_t)0x7fffffffffffffffU)
+  if (a3 < 0x7fffffffffffffffULL)
   {
     return true;
   }
-  if (a3 > (uint64_t)0x7fffffffffffffffU)
+  if (a3 > 0x7fffffffffffffffULL)
   {
     return false;
   }
-  if (a2 < (uint64_t)0xffffffffffffffffU)
+  if (a2 < 0xffffffffffffffffULL)
   {
     return true;
   }
-  if (a2 > (uint64_t)0xffffffffffffffffU)
+  if (a2 > 0xffffffffffffffffULL)
   {
     return false;
   }
-  if (a1 < (uint64_t)0x5d576e7357a4501dU)
+  if (a1 < 0x5d576e7357a4501dULL)
   {
     return true;
   }
-  if (a1 > (uint64_t)0x5d576e7357a4501dU)
+  if (a1 > 0x5d576e7357a4501dULL)
   {
     return false;
   }
-  return a0 <= (uint64_t)0xdfe92f46681b20a0U;
+  return a0 <= 0xdfe92f46681b20a0ULL;
 }
 
 static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
@@ -606,27 +591,26 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
   uint64_t l[8U] = { 0U };
   mul4(a, b, l);
   uint64_t res_b_padded[4U] = { 0U };
-  memcpy(res_b_padded, l + (uint32_t)6U, (uint32_t)2U * sizeof (uint64_t));
-  uint64_t
-  c0 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, res_b_padded[0U], (uint64_t)1U, res);
-  uint64_t *a1 = res_b_padded + (uint32_t)1U;
-  uint64_t *res1 = res + (uint32_t)1U;
+  memcpy(res_b_padded, l + 6U, 2U * sizeof (uint64_t));
+  uint64_t c0 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, res_b_padded[0U], 1ULL, res);
+  uint64_t *a1 = res_b_padded + 1U;
+  uint64_t *res1 = res + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
+    0U,
+    3U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t flag = l[5U] >> (uint32_t)63U;
-  uint64_t mask = (uint64_t)0U - flag;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t flag = l[5U] >> 63U;
+  uint64_t mask = 0ULL - flag;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (mask & res[i]) | (~mask & res_b_padded[i]);
     os[i] = x;);
@@ -634,7 +618,7 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
 
 static inline void qsquare_times_in_place(uint64_t *out, uint32_t b)
 {
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  for (uint32_t i = 0U; i < b; i++)
   {
     qsqr(out, out);
   }
@@ -642,8 +626,8 @@ static inline void qsquare_times_in_place(uint64_t *out, uint32_t b)
 
 static inline void qsquare_times(uint64_t *out, uint64_t *a, uint32_t b)
 {
-  memcpy(out, a, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  memcpy(out, a, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < b; i++)
   {
     qsqr(out, out);
   }
@@ -658,7 +642,7 @@ static inline void qinv(uint64_t *out, uint64_t *f)
   uint64_t x_1001[4U] = { 0U };
   uint64_t x_1011[4U] = { 0U };
   uint64_t x_1101[4U] = { 0U };
-  qsquare_times(x_10, f, (uint32_t)1U);
+  qsquare_times(x_10, f, 1U);
   qmul(x_11, x_10, f);
   qmul(x_101, x_10, x_11);
   qmul(x_111, x_10, x_101);
@@ -668,89 +652,89 @@ static inline void qinv(uint64_t *out, uint64_t *f)
   uint64_t x6[4U] = { 0U };
   uint64_t x8[4U] = { 0U };
   uint64_t x14[4U] = { 0U };
-  qsquare_times(x6, x_1101, (uint32_t)2U);
+  qsquare_times(x6, x_1101, 2U);
   qmul(x6, x6, x_1011);
-  qsquare_times(x8, x6, (uint32_t)2U);
+  qsquare_times(x8, x6, 2U);
   qmul(x8, x8, x_11);
-  qsquare_times(x14, x8, (uint32_t)6U);
+  qsquare_times(x14, x8, 6U);
   qmul(x14, x14, x6);
   uint64_t x56[4U] = { 0U };
-  qsquare_times(out, x14, (uint32_t)14U);
+  qsquare_times(out, x14, 14U);
   qmul(out, out, x14);
-  qsquare_times(x56, out, (uint32_t)28U);
+  qsquare_times(x56, out, 28U);
   qmul(x56, x56, out);
-  qsquare_times(out, x56, (uint32_t)56U);
+  qsquare_times(out, x56, 56U);
   qmul(out, out, x56);
-  qsquare_times_in_place(out, (uint32_t)14U);
+  qsquare_times_in_place(out, 14U);
   qmul(out, out, x14);
-  qsquare_times_in_place(out, (uint32_t)3U);
+  qsquare_times_in_place(out, 3U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)3U);
+  qsquare_times_in_place(out, 3U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)10U);
+  qsquare_times_in_place(out, 10U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)9U);
+  qsquare_times_in_place(out, 9U);
   qmul(out, out, x8);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_11);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)10U);
+  qsquare_times_in_place(out, 10U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, f);
-  qsquare_times_in_place(out, (uint32_t)8U);
+  qsquare_times_in_place(out, 8U);
   qmul(out, out, x6);
 }
 
 void Hacl_Impl_K256_Point_make_point_at_inf(uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
-  memset(px, 0U, (uint32_t)5U * sizeof (uint64_t));
-  memset(py, 0U, (uint32_t)5U * sizeof (uint64_t));
-  py[0U] = (uint64_t)1U;
-  memset(pz, 0U, (uint32_t)5U * sizeof (uint64_t));
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
+  memset(px, 0U, 5U * sizeof (uint64_t));
+  memset(py, 0U, 5U * sizeof (uint64_t));
+  py[0U] = 1ULL;
+  memset(pz, 0U, 5U * sizeof (uint64_t));
 }
 
 static inline void to_aff_point(uint64_t *p_aff, uint64_t *p)
 {
   uint64_t *x = p_aff;
-  uint64_t *y = p_aff + (uint32_t)5U;
+  uint64_t *y = p_aff + 5U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t zinv[5U] = { 0U };
   Hacl_Impl_K256_Finv_finv(zinv, z1);
   Hacl_K256_Field_fmul(x, x1, zinv);
@@ -762,7 +746,7 @@ static inline void to_aff_point(uint64_t *p_aff, uint64_t *p)
 static inline void to_aff_point_x(uint64_t *x, uint64_t *p)
 {
   uint64_t *x1 = p;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *z1 = p + 10U;
   uint64_t zinv[5U] = { 0U };
   Hacl_Impl_K256_Finv_finv(zinv, z1);
   Hacl_K256_Field_fmul(x, x1, zinv);
@@ -773,13 +757,13 @@ static inline bool is_on_curve_vartime(uint64_t *p)
 {
   uint64_t y2_exp[5U] = { 0U };
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)5U;
+  uint64_t *y = p + 5U;
   uint64_t b[5U] = { 0U };
-  b[0U] = (uint64_t)0x7U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0x7ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
   Hacl_K256_Field_fsqr(y2_exp, x);
   Hacl_K256_Field_fmul(y2_exp, y2_exp, x);
   Hacl_K256_Field_fadd(y2_exp, y2_exp, b);
@@ -795,11 +779,11 @@ static inline bool is_on_curve_vartime(uint64_t *p)
 void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   uint64_t *ox = out;
-  uint64_t *oy = out + (uint32_t)5U;
-  uint64_t *oz = out + (uint32_t)10U;
+  uint64_t *oy = out + 5U;
+  uint64_t *oz = out + 10U;
   ox[0U] = px[0U];
   ox[1U] = px[1U];
   ox[2U] = px[2U];
@@ -815,11 +799,11 @@ void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p)
   uint64_t a2 = py[2U];
   uint64_t a3 = py[3U];
   uint64_t a4 = py[4U];
-  uint64_t r0 = (uint64_t)18014381329608892U - a0;
-  uint64_t r1 = (uint64_t)18014398509481980U - a1;
-  uint64_t r2 = (uint64_t)18014398509481980U - a2;
-  uint64_t r3 = (uint64_t)18014398509481980U - a3;
-  uint64_t r4 = (uint64_t)1125899906842620U - a4;
+  uint64_t r0 = 18014381329608892ULL - a0;
+  uint64_t r1 = 18014398509481980ULL - a1;
+  uint64_t r2 = 18014398509481980ULL - a2;
+  uint64_t r3 = 18014398509481980ULL - a3;
+  uint64_t r4 = 1125899906842620ULL - a4;
   uint64_t f0 = r0;
   uint64_t f1 = r1;
   uint64_t f2 = r2;
@@ -845,9 +829,9 @@ static inline void point_negate_conditional_vartime(uint64_t *p, bool is_negate)
 static inline void aff_point_store(uint8_t *out, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
+  uint64_t *py = p + 5U;
   Hacl_K256_Field_store_felem(out, px);
-  Hacl_K256_Field_store_felem(out + (uint32_t)32U, py);
+  Hacl_K256_Field_store_felem(out + 32U, py);
 }
 
 void Hacl_Impl_K256_Point_point_store(uint8_t *out, uint64_t *p)
@@ -860,9 +844,9 @@ void Hacl_Impl_K256_Point_point_store(uint8_t *out, uint64_t *p)
 bool Hacl_Impl_K256_Point_aff_point_load_vartime(uint64_t *p, uint8_t *b)
 {
   uint8_t *px = b;
-  uint8_t *py = b + (uint32_t)32U;
+  uint8_t *py = b + 32U;
   uint64_t *bn_px = p;
-  uint64_t *bn_py = p + (uint32_t)5U;
+  uint64_t *bn_py = p + 5U;
   bool is_x_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(bn_px, px);
   bool is_y_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(bn_py, py);
   if (is_x_valid && is_y_valid)
@@ -879,14 +863,14 @@ static inline bool load_point_vartime(uint64_t *p, uint8_t *b)
   if (res)
   {
     uint64_t *x = p_aff;
-    uint64_t *y = p_aff + (uint32_t)5U;
+    uint64_t *y = p_aff + 5U;
     uint64_t *x1 = p;
-    uint64_t *y1 = p + (uint32_t)5U;
-    uint64_t *z1 = p + (uint32_t)10U;
-    memcpy(x1, x, (uint32_t)5U * sizeof (uint64_t));
-    memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-    memset(z1, 0U, (uint32_t)5U * sizeof (uint64_t));
-    z1[0U] = (uint64_t)1U;
+    uint64_t *y1 = p + 5U;
+    uint64_t *z1 = p + 10U;
+    memcpy(x1, x, 5U * sizeof (uint64_t));
+    memcpy(y1, y, 5U * sizeof (uint64_t));
+    memset(z1, 0U, 5U * sizeof (uint64_t));
+    z1[0U] = 1ULL;
   }
   return res;
 }
@@ -895,24 +879,24 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
 {
   uint8_t s0 = s[0U];
   uint8_t s01 = s0;
-  if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U))
+  if (!(s01 == 0x02U || s01 == 0x03U))
   {
     return false;
   }
-  uint8_t *xb = s + (uint32_t)1U;
+  uint8_t *xb = s + 1U;
   bool is_x_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(x, xb);
-  bool is_y_odd = s01 == (uint8_t)0x03U;
+  bool is_y_odd = s01 == 0x03U;
   if (!is_x_valid)
   {
     return false;
   }
   uint64_t y2[5U] = { 0U };
   uint64_t b[5U] = { 0U };
-  b[0U] = (uint64_t)0x7U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0x7ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
   Hacl_K256_Field_fsqr(y2, x);
   Hacl_K256_Field_fmul(y2, y2, x);
   Hacl_K256_Field_fadd(y2, y2, b);
@@ -930,7 +914,7 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
     return false;
   }
   uint64_t x0 = y[0U];
-  bool is_y_odd1 = (x0 & (uint64_t)1U) == (uint64_t)1U;
+  bool is_y_odd1 = (x0 & 1ULL) == 1ULL;
   Hacl_K256_Field_fnegate_conditional_vartime(y, is_y_odd1 != is_y_odd);
   return true;
 }
@@ -939,33 +923,33 @@ void Hacl_Impl_K256_PointDouble_point_double(uint64_t *out, uint64_t *p)
 {
   uint64_t tmp[25U] = { 0U };
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
   uint64_t *yy = tmp;
-  uint64_t *zz = tmp + (uint32_t)5U;
-  uint64_t *bzz3 = tmp + (uint32_t)10U;
-  uint64_t *bzz9 = tmp + (uint32_t)15U;
-  uint64_t *tmp1 = tmp + (uint32_t)20U;
+  uint64_t *zz = tmp + 5U;
+  uint64_t *bzz3 = tmp + 10U;
+  uint64_t *bzz9 = tmp + 15U;
+  uint64_t *tmp1 = tmp + 20U;
   Hacl_K256_Field_fsqr(yy, y1);
   Hacl_K256_Field_fsqr(zz, z1);
-  Hacl_K256_Field_fmul_small_num(x3, x1, (uint64_t)2U);
+  Hacl_K256_Field_fmul_small_num(x3, x1, 2ULL);
   Hacl_K256_Field_fmul(x3, x3, y1);
   Hacl_K256_Field_fmul(tmp1, yy, y1);
   Hacl_K256_Field_fmul(z3, tmp1, z1);
-  Hacl_K256_Field_fmul_small_num(z3, z3, (uint64_t)8U);
+  Hacl_K256_Field_fmul_small_num(z3, z3, 8ULL);
   Hacl_K256_Field_fnormalize_weak(z3, z3);
-  Hacl_K256_Field_fmul_small_num(bzz3, zz, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(bzz3, zz, 21ULL);
   Hacl_K256_Field_fnormalize_weak(bzz3, bzz3);
-  Hacl_K256_Field_fmul_small_num(bzz9, bzz3, (uint64_t)3U);
-  Hacl_K256_Field_fsub(bzz9, yy, bzz9, (uint64_t)6U);
+  Hacl_K256_Field_fmul_small_num(bzz9, bzz3, 3ULL);
+  Hacl_K256_Field_fsub(bzz9, yy, bzz9, 6ULL);
   Hacl_K256_Field_fadd(tmp1, yy, bzz3);
   Hacl_K256_Field_fmul(tmp1, bzz9, tmp1);
   Hacl_K256_Field_fmul(y3, yy, zz);
   Hacl_K256_Field_fmul(x3, x3, bzz9);
-  Hacl_K256_Field_fmul_small_num(y3, y3, (uint64_t)168U);
+  Hacl_K256_Field_fmul_small_num(y3, y3, 168ULL);
   Hacl_K256_Field_fadd(y3, tmp1, y3);
   Hacl_K256_Field_fnormalize_weak(y3, y3);
 }
@@ -974,23 +958,23 @@ void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[45U] = { 0U };
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t *x2 = q;
-  uint64_t *y2 = q + (uint32_t)5U;
-  uint64_t *z2 = q + (uint32_t)10U;
+  uint64_t *y2 = q + 5U;
+  uint64_t *z2 = q + 10U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
   uint64_t *xx = tmp;
-  uint64_t *yy = tmp + (uint32_t)5U;
-  uint64_t *zz = tmp + (uint32_t)10U;
-  uint64_t *xy_pairs = tmp + (uint32_t)15U;
-  uint64_t *yz_pairs = tmp + (uint32_t)20U;
-  uint64_t *xz_pairs = tmp + (uint32_t)25U;
-  uint64_t *yy_m_bzz3 = tmp + (uint32_t)30U;
-  uint64_t *yy_p_bzz3 = tmp + (uint32_t)35U;
-  uint64_t *tmp1 = tmp + (uint32_t)40U;
+  uint64_t *yy = tmp + 5U;
+  uint64_t *zz = tmp + 10U;
+  uint64_t *xy_pairs = tmp + 15U;
+  uint64_t *yz_pairs = tmp + 20U;
+  uint64_t *xz_pairs = tmp + 25U;
+  uint64_t *yy_m_bzz3 = tmp + 30U;
+  uint64_t *yy_p_bzz3 = tmp + 35U;
+  uint64_t *tmp1 = tmp + 40U;
   Hacl_K256_Field_fmul(xx, x1, x2);
   Hacl_K256_Field_fmul(yy, y1, y2);
   Hacl_K256_Field_fmul(zz, z1, z2);
@@ -998,29 +982,29 @@ void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q)
   Hacl_K256_Field_fadd(tmp1, x2, y2);
   Hacl_K256_Field_fmul(xy_pairs, xy_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, xx, yy);
-  Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, (uint64_t)4U);
+  Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, 4ULL);
   Hacl_K256_Field_fadd(yz_pairs, y1, z1);
   Hacl_K256_Field_fadd(tmp1, y2, z2);
   Hacl_K256_Field_fmul(yz_pairs, yz_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, yy, zz);
-  Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, (uint64_t)4U);
+  Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, 4ULL);
   Hacl_K256_Field_fadd(xz_pairs, x1, z1);
   Hacl_K256_Field_fadd(tmp1, x2, z2);
   Hacl_K256_Field_fmul(xz_pairs, xz_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, xx, zz);
-  Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, (uint64_t)4U);
-  Hacl_K256_Field_fmul_small_num(tmp1, zz, (uint64_t)21U);
+  Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, 4ULL);
+  Hacl_K256_Field_fmul_small_num(tmp1, zz, 21ULL);
   Hacl_K256_Field_fnormalize_weak(tmp1, tmp1);
-  Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, (uint64_t)2U);
+  Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, 2ULL);
   Hacl_K256_Field_fadd(yy_p_bzz3, yy, tmp1);
-  Hacl_K256_Field_fmul_small_num(x3, yz_pairs, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(x3, yz_pairs, 21ULL);
   Hacl_K256_Field_fnormalize_weak(x3, x3);
-  Hacl_K256_Field_fmul_small_num(z3, xx, (uint64_t)3U);
-  Hacl_K256_Field_fmul_small_num(y3, z3, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(z3, xx, 3ULL);
+  Hacl_K256_Field_fmul_small_num(y3, z3, 21ULL);
   Hacl_K256_Field_fnormalize_weak(y3, y3);
   Hacl_K256_Field_fmul(tmp1, xy_pairs, yy_m_bzz3);
   Hacl_K256_Field_fmul(x3, x3, xz_pairs);
-  Hacl_K256_Field_fsub(x3, tmp1, x3, (uint64_t)2U);
+  Hacl_K256_Field_fsub(x3, tmp1, x3, 2ULL);
   Hacl_K256_Field_fnormalize_weak(x3, x3);
   Hacl_K256_Field_fmul(tmp1, yy_p_bzz3, yy_m_bzz3);
   Hacl_K256_Field_fmul(y3, y3, xz_pairs);
@@ -1036,30 +1020,30 @@ static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k)
 {
   uint64_t tmp1[4U] = { 0U };
   uint64_t tmp2[4U] = { 0U };
-  tmp1[0U] = (uint64_t)0xe893209a45dbb031U;
-  tmp1[1U] = (uint64_t)0x3daa8a1471e8ca7fU;
-  tmp1[2U] = (uint64_t)0xe86c90e49284eb15U;
-  tmp1[3U] = (uint64_t)0x3086d221a7d46bcdU;
-  tmp2[0U] = (uint64_t)0x1571b4ae8ac47f71U;
-  tmp2[1U] = (uint64_t)0x221208ac9df506c6U;
-  tmp2[2U] = (uint64_t)0x6f547fa90abfe4c4U;
-  tmp2[3U] = (uint64_t)0xe4437ed6010e8828U;
+  tmp1[0U] = 0xe893209a45dbb031ULL;
+  tmp1[1U] = 0x3daa8a1471e8ca7fULL;
+  tmp1[2U] = 0xe86c90e49284eb15ULL;
+  tmp1[3U] = 0x3086d221a7d46bcdULL;
+  tmp2[0U] = 0x1571b4ae8ac47f71ULL;
+  tmp2[1U] = 0x221208ac9df506c6ULL;
+  tmp2[2U] = 0x6f547fa90abfe4c4ULL;
+  tmp2[3U] = 0xe4437ed6010e8828ULL;
   qmul_shift_384(r1, k, tmp1);
   qmul_shift_384(r2, k, tmp2);
-  tmp1[0U] = (uint64_t)0x6f547fa90abfe4c3U;
-  tmp1[1U] = (uint64_t)0xe4437ed6010e8828U;
-  tmp1[2U] = (uint64_t)0x0U;
-  tmp1[3U] = (uint64_t)0x0U;
-  tmp2[0U] = (uint64_t)0xd765cda83db1562cU;
-  tmp2[1U] = (uint64_t)0x8a280ac50774346dU;
-  tmp2[2U] = (uint64_t)0xfffffffffffffffeU;
-  tmp2[3U] = (uint64_t)0xffffffffffffffffU;
+  tmp1[0U] = 0x6f547fa90abfe4c3ULL;
+  tmp1[1U] = 0xe4437ed6010e8828ULL;
+  tmp1[2U] = 0x0ULL;
+  tmp1[3U] = 0x0ULL;
+  tmp2[0U] = 0xd765cda83db1562cULL;
+  tmp2[1U] = 0x8a280ac50774346dULL;
+  tmp2[2U] = 0xfffffffffffffffeULL;
+  tmp2[3U] = 0xffffffffffffffffULL;
   qmul(r1, r1, tmp1);
   qmul(r2, r2, tmp2);
-  tmp1[0U] = (uint64_t)0xe0cfc810b51283cfU;
-  tmp1[1U] = (uint64_t)0xa880b9fc8ec739c2U;
-  tmp1[2U] = (uint64_t)0x5ad9e3fd77ed9ba4U;
-  tmp1[3U] = (uint64_t)0xac9c52b33fa3cf1fU;
+  tmp1[0U] = 0xe0cfc810b51283cfULL;
+  tmp1[1U] = 0xa880b9fc8ec739c2ULL;
+  tmp1[2U] = 0x5ad9e3fd77ed9ba4ULL;
+  tmp1[3U] = 0xac9c52b33fa3cf1fULL;
   qadd(r2, r1, r2);
   qmul(tmp2, r2, tmp1);
   qadd(r1, k, tmp2);
@@ -1068,17 +1052,17 @@ static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k)
 static inline void point_mul_lambda(uint64_t *res, uint64_t *p)
 {
   uint64_t *rx = res;
-  uint64_t *ry = res + (uint32_t)5U;
-  uint64_t *rz = res + (uint32_t)10U;
+  uint64_t *ry = res + 5U;
+  uint64_t *rz = res + 10U;
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   uint64_t beta[5U] = { 0U };
-  beta[0U] = (uint64_t)0x96c28719501eeU;
-  beta[1U] = (uint64_t)0x7512f58995c13U;
-  beta[2U] = (uint64_t)0xc3434e99cf049U;
-  beta[3U] = (uint64_t)0x7106e64479eaU;
-  beta[4U] = (uint64_t)0x7ae96a2b657cU;
+  beta[0U] = 0x96c28719501eeULL;
+  beta[1U] = 0x7512f58995c13ULL;
+  beta[2U] = 0xc3434e99cf049ULL;
+  beta[3U] = 0x7106e64479eaULL;
+  beta[4U] = 0x7ae96a2b657cULL;
   Hacl_K256_Field_fmul(rx, beta, px);
   ry[0U] = py[0U];
   ry[1U] = py[1U];
@@ -1096,11 +1080,11 @@ static inline void point_mul_lambda_inplace(uint64_t *res)
 {
   uint64_t *rx = res;
   uint64_t beta[5U] = { 0U };
-  beta[0U] = (uint64_t)0x96c28719501eeU;
-  beta[1U] = (uint64_t)0x7512f58995c13U;
-  beta[2U] = (uint64_t)0xc3434e99cf049U;
-  beta[3U] = (uint64_t)0x7106e64479eaU;
-  beta[4U] = (uint64_t)0x7ae96a2b657cU;
+  beta[0U] = 0x96c28719501eeULL;
+  beta[1U] = 0x7512f58995c13ULL;
+  beta[2U] = 0xc3434e99cf049ULL;
+  beta[3U] = 0x7106e64479eaULL;
+  beta[4U] = 0x7ae96a2b657cULL;
   Hacl_K256_Field_fmul(rx, beta, rx);
 }
 
@@ -1123,7 +1107,7 @@ ecmult_endo_split(
 {
   scalar_split_lambda(r1, r2, scalar);
   point_mul_lambda(q2, q);
-  memcpy(q1, q, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(q1, q, 15U * sizeof (uint64_t));
   bool b0 = is_qelem_le_q_halved_vartime(r1);
   qnegate_conditional_vartime(r1, !b0);
   point_negate_conditional_vartime(q1, !b0);
@@ -1140,45 +1124,37 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t
   uint64_t table[240U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)15U;
+  uint64_t *t1 = table + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, q, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, q, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, q, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   Hacl_Impl_K256_Point_make_point_at_inf(out);
   uint64_t tmp0[15U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)15U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 15U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)15U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 15U;
       KRML_MAYBE_FOR15(i,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
+        0U,
+        15U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -1188,17 +1164,17 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 15U;
     KRML_MAYBE_FOR15(i,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
+      0U,
+      15U,
+      1U,
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
       os[i] = x;););
@@ -1208,79 +1184,72 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar)
 {
   uint64_t q1[15U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t
   q2[15U] =
     {
-      (uint64_t)4496295042185355U, (uint64_t)3125448202219451U, (uint64_t)1239608518490046U,
-      (uint64_t)2687445637493112U, (uint64_t)77979604880139U, (uint64_t)3360310474215011U,
-      (uint64_t)1216410458165163U, (uint64_t)177901593587973U, (uint64_t)3209978938104985U,
-      (uint64_t)118285133003718U, (uint64_t)434519962075150U, (uint64_t)1114612377498854U,
-      (uint64_t)3488596944003813U, (uint64_t)450716531072892U, (uint64_t)66044973203836U
+      4496295042185355ULL, 3125448202219451ULL, 1239608518490046ULL, 2687445637493112ULL,
+      77979604880139ULL, 3360310474215011ULL, 1216410458165163ULL, 177901593587973ULL,
+      3209978938104985ULL, 118285133003718ULL, 434519962075150ULL, 1114612377498854ULL,
+      3488596944003813ULL, 450716531072892ULL, 66044973203836ULL
     };
-  KRML_HOST_IGNORE(q2);
+  KRML_MAYBE_UNUSED_VAR(q2);
   uint64_t
   q3[15U] =
     {
-      (uint64_t)1277614565900951U, (uint64_t)378671684419493U, (uint64_t)3176260448102880U,
-      (uint64_t)1575691435565077U, (uint64_t)167304528382180U, (uint64_t)2600787765776588U,
-      (uint64_t)7497946149293U, (uint64_t)2184272641272202U, (uint64_t)2200235265236628U,
-      (uint64_t)265969268774814U, (uint64_t)1913228635640715U, (uint64_t)2831959046949342U,
-      (uint64_t)888030405442963U, (uint64_t)1817092932985033U, (uint64_t)101515844997121U
+      1277614565900951ULL, 378671684419493ULL, 3176260448102880ULL, 1575691435565077ULL,
+      167304528382180ULL, 2600787765776588ULL, 7497946149293ULL, 2184272641272202ULL,
+      2200235265236628ULL, 265969268774814ULL, 1913228635640715ULL, 2831959046949342ULL,
+      888030405442963ULL, 1817092932985033ULL, 101515844997121ULL
     };
-  KRML_HOST_IGNORE(q3);
+  KRML_MAYBE_UNUSED_VAR(q3);
   uint64_t
   q4[15U] =
     {
-      (uint64_t)34056422761564U, (uint64_t)3315864838337811U, (uint64_t)3797032336888745U,
-      (uint64_t)2580641850480806U, (uint64_t)208048944042500U, (uint64_t)1233795288689421U,
-      (uint64_t)1048795233382631U, (uint64_t)646545158071530U, (uint64_t)1816025742137285U,
-      (uint64_t)12245672982162U, (uint64_t)2119364213800870U, (uint64_t)2034960311715107U,
-      (uint64_t)3172697815804487U, (uint64_t)4185144850224160U, (uint64_t)2792055915674U
+      34056422761564ULL, 3315864838337811ULL, 3797032336888745ULL, 2580641850480806ULL,
+      208048944042500ULL, 1233795288689421ULL, 1048795233382631ULL, 646545158071530ULL,
+      1816025742137285ULL, 12245672982162ULL, 2119364213800870ULL, 2034960311715107ULL,
+      3172697815804487ULL, 4185144850224160ULL, 2792055915674ULL
     };
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q4);
   uint64_t *r1 = scalar;
-  uint64_t *r2 = scalar + (uint32_t)1U;
-  uint64_t *r3 = scalar + (uint32_t)2U;
-  uint64_t *r4 = scalar + (uint32_t)3U;
+  uint64_t *r2 = scalar + 1U;
+  uint64_t *r3 = scalar + 2U;
+  uint64_t *r4 = scalar + 3U;
   Hacl_Impl_K256_Point_make_point_at_inf(out);
   uint64_t tmp[15U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp););
 }
@@ -1290,75 +1259,65 @@ point_mul_g_double_vartime(uint64_t *out, uint64_t *scalar1, uint64_t *scalar2,
 {
   uint64_t q1[15U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t table2[480U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)15U;
+  uint64_t *t1 = table2 + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, q2, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, q2, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   uint64_t tmp0[15U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)15U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)15U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U);
+  const uint64_t *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 15U;
+  memcpy(out, (uint64_t *)a_bits_l, 15U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)15U;
-  memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 15U;
+  memcpy(tmp0, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t));
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0);
   uint64_t tmp1[15U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)15U;
-    memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 15U;
+    memcpy(tmp1, (uint64_t *)a_bits_l1, 15U * sizeof (uint64_t));
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)15U;
-    memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 15U;
+    memcpy(tmp1, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t));
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
   }
 }
@@ -1380,99 +1339,89 @@ point_mul_g_double_split_lambda_table(
   uint64_t table2[480U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)15U;
+  uint64_t *t1 = table2 + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, p2, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, p2, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, p2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   uint64_t tmp0[15U] = { 0U };
   uint64_t tmp1[15U] = { 0U };
-  uint32_t i0 = (uint32_t)125U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r1, i0, (uint32_t)5U);
+  uint32_t i0 = 125U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)15U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 15U;
+  memcpy(out, (uint64_t *)a_bits_l, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(out, is_negate1);
-  uint32_t i1 = (uint32_t)125U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r2, i1, (uint32_t)5U);
+  uint32_t i1 = 125U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
   const
   uint64_t
-  *a_bits_l0 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l320 * (uint32_t)15U;
-  memcpy(tmp1, (uint64_t *)a_bits_l0, (uint32_t)15U * sizeof (uint64_t));
+  *a_bits_l0 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l320 * 15U;
+  memcpy(tmp1, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp1, is_negate2);
   point_mul_lambda_inplace(tmp1);
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
   uint64_t tmp10[15U] = { 0U };
-  uint32_t i2 = (uint32_t)125U;
-  uint64_t bits_c1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r3, i2, (uint32_t)5U);
+  uint32_t i2 = 125U;
+  uint64_t bits_c1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, i2, 5U);
   uint32_t bits_l321 = (uint32_t)bits_c1;
-  const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)15U;
-  memcpy(tmp0, (uint64_t *)a_bits_l1, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l1 = table2 + bits_l321 * 15U;
+  memcpy(tmp0, (uint64_t *)a_bits_l1, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp0, is_negate3);
-  uint32_t i3 = (uint32_t)125U;
-  uint64_t bits_c2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r4, i3, (uint32_t)5U);
+  uint32_t i3 = 125U;
+  uint64_t bits_c2 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r4, i3, 5U);
   uint32_t bits_l322 = (uint32_t)bits_c2;
-  const uint64_t *a_bits_l2 = table2 + bits_l322 * (uint32_t)15U;
-  memcpy(tmp10, (uint64_t *)a_bits_l2, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l2 = table2 + bits_l322 * 15U;
+  memcpy(tmp10, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp10, is_negate4);
   point_mul_lambda_inplace(tmp10);
   Hacl_Impl_K256_PointAdd_point_add(tmp0, tmp0, tmp10);
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0);
   uint64_t tmp2[15U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
-    KRML_MAYBE_FOR5(i4,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r4, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i4, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 125U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r4, k, 5U);
     uint32_t bits_l323 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l3 = table2 + bits_l323 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l3, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l3 = table2 + bits_l323 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l3, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate4);
     point_mul_lambda_inplace(tmp2);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k0 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r3, k0, (uint32_t)5U);
+    uint32_t k0 = 125U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, k0, 5U);
     uint32_t bits_l324 = (uint32_t)bits_l0;
-    const uint64_t *a_bits_l4 = table2 + bits_l324 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l4, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l4 = table2 + bits_l324 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l4, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate3);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k1 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r2, k1, (uint32_t)5U);
+    uint32_t k1 = 125U - 5U * i - 5U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r2, k1, 5U);
     uint32_t bits_l325 = (uint32_t)bits_l1;
     const
     uint64_t
-    *a_bits_l5 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l325 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l5, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l5 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l325 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l5, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate2);
     point_mul_lambda_inplace(tmp2);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k2 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r1, k2, (uint32_t)5U);
+    uint32_t k2 = 125U - 5U * i - 5U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r1, k2, 5U);
     uint32_t bits_l326 = (uint32_t)bits_l2;
     const
     uint64_t
-    *a_bits_l6 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l326 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l6, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l6 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l326 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l6, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate1);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
   }
@@ -1483,16 +1432,16 @@ check_ecmult_endo_split(uint64_t *r1, uint64_t *r2, uint64_t *r3, uint64_t *r4)
 {
   uint64_t f20 = r1[2U];
   uint64_t f30 = r1[3U];
-  bool b1 = f20 == (uint64_t)0U && f30 == (uint64_t)0U;
+  bool b1 = f20 == 0ULL && f30 == 0ULL;
   uint64_t f21 = r2[2U];
   uint64_t f31 = r2[3U];
-  bool b2 = f21 == (uint64_t)0U && f31 == (uint64_t)0U;
+  bool b2 = f21 == 0ULL && f31 == 0ULL;
   uint64_t f22 = r3[2U];
   uint64_t f32 = r3[3U];
-  bool b3 = f22 == (uint64_t)0U && f32 == (uint64_t)0U;
+  bool b3 = f22 == 0ULL && f32 == 0ULL;
   uint64_t f2 = r4[2U];
   uint64_t f3 = r4[3U];
-  bool b4 = f2 == (uint64_t)0U && f3 == (uint64_t)0U;
+  bool b4 = f2 == 0ULL && f3 == 0ULL;
   return b1 && b2 && b3 && b4;
 }
 
@@ -1515,30 +1464,30 @@ point_mul_g_double_split_lambda_vartime(
 {
   uint64_t g[15U] = { 0U };
   uint64_t *gx = g;
-  uint64_t *gy = g + (uint32_t)5U;
-  uint64_t *gz = g + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = g + 5U;
+  uint64_t *gz = g + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t r1234[16U] = { 0U };
   uint64_t q1234[60U] = { 0U };
   uint64_t *r1 = r1234;
-  uint64_t *r2 = r1234 + (uint32_t)4U;
-  uint64_t *r3 = r1234 + (uint32_t)8U;
-  uint64_t *r4 = r1234 + (uint32_t)12U;
+  uint64_t *r2 = r1234 + 4U;
+  uint64_t *r3 = r1234 + 8U;
+  uint64_t *r4 = r1234 + 12U;
   uint64_t *q1 = q1234;
-  uint64_t *q2 = q1234 + (uint32_t)15U;
-  uint64_t *q3 = q1234 + (uint32_t)30U;
-  uint64_t *q4 = q1234 + (uint32_t)45U;
+  uint64_t *q2 = q1234 + 15U;
+  uint64_t *q3 = q1234 + 30U;
+  uint64_t *q4 = q1234 + 45U;
   __bool_bool scrut0 = ecmult_endo_split(r1, r2, q1, q2, scalar1, g);
   bool is_high10 = scrut0.fst;
   bool is_high20 = scrut0.snd;
@@ -1615,30 +1564,30 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(
   uint8_t *nonce
 )
 {
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
-  KRML_HOST_IGNORE(oneq);
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
+  KRML_MAYBE_UNUSED_VAR(oneq);
   uint64_t rsdk_q[16U] = { 0U };
   uint64_t *r_q = rsdk_q;
-  uint64_t *s_q = rsdk_q + (uint32_t)4U;
-  uint64_t *d_a = rsdk_q + (uint32_t)8U;
-  uint64_t *k_q = rsdk_q + (uint32_t)12U;
+  uint64_t *s_q = rsdk_q + 4U;
+  uint64_t *d_a = rsdk_q + 8U;
+  uint64_t *k_q = rsdk_q + 12U;
   uint64_t is_b_valid0 = load_qelem_check(d_a, private_key);
-  uint64_t oneq10[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq10[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = d_a;
     uint64_t uu____0 = oneq10[i];
     uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0));
     os[i] = x;);
   uint64_t is_sk_valid = is_b_valid0;
   uint64_t is_b_valid = load_qelem_check(k_q, nonce);
-  uint64_t oneq1[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq1[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = k_q;
     uint64_t uu____1 = oneq1[i];
     uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1));
@@ -1660,11 +1609,11 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(
   qadd(s_q, z, s_q);
   qmul(s_q, kinv, s_q);
   store_qelem(signature, r_q);
-  store_qelem(signature + (uint32_t)32U, s_q);
+  store_qelem(signature + 32U, s_q);
   uint64_t is_r_zero = is_qelem_zero(r_q);
   uint64_t is_s_zero = is_qelem_zero(s_q);
   uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero);
-  bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool res = m == 0xFFFFFFFFFFFFFFFFULL;
   return res;
 }
 
@@ -1691,7 +1640,7 @@ Hacl_K256_ECDSA_ecdsa_sign_sha256(
 )
 {
   uint8_t msgHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, msgHash);
+  Hacl_Hash_SHA2_hash_256(msgHash, msg, msg_len);
   bool b = Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(signature, msgHash, private_key, nonce);
   return b;
 }
@@ -1713,14 +1662,14 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
 {
   uint64_t tmp[35U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *r_q = tmp + (uint32_t)15U;
-  uint64_t *s_q = tmp + (uint32_t)19U;
-  uint64_t *u1 = tmp + (uint32_t)23U;
-  uint64_t *u2 = tmp + (uint32_t)27U;
-  uint64_t *m_q = tmp + (uint32_t)31U;
+  uint64_t *r_q = tmp + 15U;
+  uint64_t *s_q = tmp + 19U;
+  uint64_t *u1 = tmp + 23U;
+  uint64_t *u2 = tmp + 27U;
+  uint64_t *m_q = tmp + 31U;
   bool is_pk_valid = load_point_vartime(pk, public_key);
   bool is_r_valid = load_qelem_vartime(r_q, signature);
-  bool is_s_valid = load_qelem_vartime(s_q, signature + (uint32_t)32U);
+  bool is_s_valid = load_qelem_vartime(s_q, signature + 32U);
   bool is_rs_valid = is_r_valid && is_s_valid;
   load_qelem_modq(m_q, m);
   if (!(is_pk_valid && is_rs_valid))
@@ -1734,7 +1683,7 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
   uint64_t res[15U] = { 0U };
   point_mul_g_double_split_lambda_vartime(res, u1, u2, pk);
   uint64_t tmp1[5U] = { 0U };
-  uint64_t *pz = res + (uint32_t)10U;
+  uint64_t *pz = res + 10U;
   Hacl_K256_Field_fnormalize(tmp1, pz);
   bool b = Hacl_K256_Field_is_felem_zero_vartime(tmp1);
   if (b)
@@ -1742,7 +1691,7 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
     return false;
   }
   uint64_t *x = res;
-  uint64_t *z = res + (uint32_t)10U;
+  uint64_t *z = res + 10U;
   uint8_t r_bytes[32U] = { 0U };
   uint64_t r_fe[5U] = { 0U };
   uint64_t tmp_q[5U] = { 0U };
@@ -1756,11 +1705,11 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
     bool is_r_lt_p_m_q = Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(r_fe);
     if (is_r_lt_p_m_q)
     {
-      tmp_q[0U] = (uint64_t)0x25e8cd0364141U;
-      tmp_q[1U] = (uint64_t)0xe6af48a03bbfdU;
-      tmp_q[2U] = (uint64_t)0xffffffebaaedcU;
-      tmp_q[3U] = (uint64_t)0xfffffffffffffU;
-      tmp_q[4U] = (uint64_t)0xffffffffffffU;
+      tmp_q[0U] = 0x25e8cd0364141ULL;
+      tmp_q[1U] = 0xe6af48a03bbfdULL;
+      tmp_q[2U] = 0xffffffebaaedcULL;
+      tmp_q[3U] = 0xfffffffffffffULL;
+      tmp_q[4U] = 0xffffffffffffULL;
       Hacl_K256_Field_fadd(tmp_q, r_fe, tmp_q);
       return fmul_eq_vartime(tmp_q, z, tmp_x);
     }
@@ -1790,7 +1739,7 @@ Hacl_K256_ECDSA_ecdsa_verify_sha256(
 )
 {
   uint8_t mHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
+  Hacl_Hash_SHA2_hash_256(mHash, msg, msg_len);
   bool b = Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(mHash, public_key, signature);
   return b;
 }
@@ -1805,7 +1754,7 @@ Compute canonical lowest S value for `signature` (R || S).
 bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature)
 {
   uint64_t s_q[4U] = { 0U };
-  uint8_t *s = signature + (uint32_t)32U;
+  uint8_t *s = signature + 32U;
   bool is_sk_valid = load_qelem_vartime(s_q, s);
   if (!is_sk_valid)
   {
@@ -1813,7 +1762,7 @@ bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature)
   }
   bool is_sk_lt_q_halved = is_qelem_le_q_halved_vartime(s_q);
   qnegate_conditional_vartime(s_q, !is_sk_lt_q_halved);
-  store_qelem(signature + (uint32_t)32U, s_q);
+  store_qelem(signature + 32U, s_q);
   return true;
 }
 
@@ -1827,7 +1776,7 @@ Check whether `signature` (R || S) is in canonical form.
 bool Hacl_K256_ECDSA_secp256k1_ecdsa_is_signature_normalized(uint8_t *signature)
 {
   uint64_t s_q[4U] = { 0U };
-  uint8_t *s = signature + (uint32_t)32U;
+  uint8_t *s = signature + 32U;
   bool is_s_valid = load_qelem_vartime(s_q, s);
   bool is_s_lt_q_halved = is_qelem_le_q_halved_vartime(s_q);
   return is_s_valid && is_s_lt_q_halved;
@@ -1886,7 +1835,7 @@ Hacl_K256_ECDSA_secp256k1_ecdsa_sign_sha256(
 )
 {
   uint8_t msgHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, msgHash);
+  Hacl_Hash_SHA2_hash_256(msgHash, msg, msg_len);
   bool
   b = Hacl_K256_ECDSA_secp256k1_ecdsa_sign_hashed_msg(signature, msgHash, private_key, nonce);
   return b;
@@ -1940,7 +1889,7 @@ Hacl_K256_ECDSA_secp256k1_ecdsa_verify_sha256(
 )
 {
   uint8_t mHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
+  Hacl_Hash_SHA2_hash_256(mHash, msg, msg_len);
   bool b = Hacl_K256_ECDSA_secp256k1_ecdsa_verify_hashed_msg(mHash, public_key, signature);
   return b;
 }
@@ -1971,11 +1920,11 @@ Convert a public key from uncompressed to its raw form.
 bool Hacl_K256_ECDSA_public_key_uncompressed_to_raw(uint8_t *pk_raw, uint8_t *pk)
 {
   uint8_t pk0 = pk[0U];
-  if (pk0 != (uint8_t)0x04U)
+  if (pk0 != 0x04U)
   {
     return false;
   }
-  memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(pk_raw, pk + 1U, 64U * sizeof (uint8_t));
   return true;
 }
 
@@ -1989,8 +1938,8 @@ Convert a public key from raw to its uncompressed form.
 */
 void Hacl_K256_ECDSA_public_key_uncompressed_from_raw(uint8_t *pk, uint8_t *pk_raw)
 {
-  pk[0U] = (uint8_t)0x04U;
-  memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof (uint8_t));
+  pk[0U] = 0x04U;
+  memcpy(pk + 1U, pk_raw, 64U * sizeof (uint8_t));
 }
 
 /**
@@ -2007,12 +1956,12 @@ bool Hacl_K256_ECDSA_public_key_compressed_to_raw(uint8_t *pk_raw, uint8_t *pk)
 {
   uint64_t xa[5U] = { 0U };
   uint64_t ya[5U] = { 0U };
-  uint8_t *pk_xb = pk + (uint32_t)1U;
+  uint8_t *pk_xb = pk + 1U;
   bool b = aff_point_decompress_vartime(xa, ya, pk);
   if (b)
   {
-    memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof (uint8_t));
-    Hacl_K256_Field_store_felem(pk_raw + (uint32_t)32U, ya);
+    memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t));
+    Hacl_K256_Field_store_felem(pk_raw + 32U, ya);
   }
   return b;
 }
@@ -2028,20 +1977,20 @@ Convert a public key from raw to its compressed form.
 void Hacl_K256_ECDSA_public_key_compressed_from_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint8_t *pk_x = pk_raw;
-  uint8_t *pk_y = pk_raw + (uint32_t)32U;
+  uint8_t *pk_y = pk_raw + 32U;
   uint8_t x0 = pk_y[31U];
-  bool is_pk_y_odd = (x0 & (uint8_t)1U) == (uint8_t)1U;
+  bool is_pk_y_odd = ((uint32_t)x0 & 1U) == 1U;
   uint8_t ite;
   if (is_pk_y_odd)
   {
-    ite = (uint8_t)0x03U;
+    ite = 0x03U;
   }
   else
   {
-    ite = (uint8_t)0x02U;
+    ite = 0x02U;
   }
   pk[0U] = ite;
-  memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(pk + 1U, pk_x, 32U * sizeof (uint8_t));
 }
 
 
@@ -2084,7 +2033,7 @@ bool Hacl_K256_ECDSA_is_private_key_valid(uint8_t *private_key)
 {
   uint64_t s_q[4U] = { 0U };
   uint64_t res = load_qelem_check(s_q, private_key);
-  return res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return res == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -2107,13 +2056,13 @@ bool Hacl_K256_ECDSA_secret_to_public(uint8_t *public_key, uint8_t *private_key)
 {
   uint64_t tmp[19U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *sk = tmp + (uint32_t)15U;
+  uint64_t *sk = tmp + 15U;
   uint64_t is_b_valid = load_qelem_check(sk, private_key);
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -2121,7 +2070,7 @@ bool Hacl_K256_ECDSA_secret_to_public(uint8_t *public_key, uint8_t *private_key)
   uint64_t is_sk_valid = is_b_valid;
   point_mul_g(pk, sk);
   Hacl_Impl_K256_Point_point_store(public_key, pk);
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -2140,15 +2089,15 @@ bool Hacl_K256_ECDSA_ecdh(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t
 {
   uint64_t tmp[34U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *ss = tmp + (uint32_t)15U;
-  uint64_t *sk = tmp + (uint32_t)30U;
+  uint64_t *ss = tmp + 15U;
+  uint64_t *sk = tmp + 30U;
   bool is_pk_valid = load_point_vartime(pk, their_pubkey);
   uint64_t is_b_valid = load_qelem_check(sk, private_key);
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -2159,6 +2108,6 @@ bool Hacl_K256_ECDSA_ecdh(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t
     Hacl_Impl_K256_PointMul_point_mul(ss, sk, pk);
     Hacl_Impl_K256_Point_point_store(shared_secret, ss);
   }
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL && is_pk_valid;
 }
 
diff --git a/src/Hacl_MAC_Poly1305.c b/src/Hacl_MAC_Poly1305.c
new file mode 100644
index 00000000..28cbca5a
--- /dev/null
+++ b/src/Hacl_MAC_Poly1305.c
@@ -0,0 +1,712 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_MAC_Poly1305.h"
+
+void Hacl_MAC_Poly1305_poly1305_init(uint64_t *ctx, uint8_t *key)
+{
+  uint64_t *acc = ctx;
+  uint64_t *pre = ctx + 5U;
+  uint8_t *kr = key;
+  acc[0U] = 0ULL;
+  acc[1U] = 0ULL;
+  acc[2U] = 0ULL;
+  acc[3U] = 0ULL;
+  acc[4U] = 0ULL;
+  uint64_t u0 = load64_le(kr);
+  uint64_t lo = u0;
+  uint64_t u = load64_le(kr + 8U);
+  uint64_t hi = u;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
+  uint64_t lo1 = lo & mask0;
+  uint64_t hi1 = hi & mask1;
+  uint64_t *r = pre;
+  uint64_t *r5 = pre + 5U;
+  uint64_t *rn = pre + 10U;
+  uint64_t *rn_5 = pre + 15U;
+  uint64_t r_vec0 = lo1;
+  uint64_t r_vec1 = hi1;
+  uint64_t f00 = r_vec0 & 0x3ffffffULL;
+  uint64_t f10 = r_vec0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = r_vec0 >> 52U | (r_vec1 & 0x3fffULL) << 12U;
+  uint64_t f30 = r_vec1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = r_vec1 >> 40U;
+  uint64_t f0 = f00;
+  uint64_t f1 = f10;
+  uint64_t f2 = f20;
+  uint64_t f3 = f30;
+  uint64_t f4 = f40;
+  r[0U] = f0;
+  r[1U] = f1;
+  r[2U] = f2;
+  r[3U] = f3;
+  r[4U] = f4;
+  uint64_t f200 = r[0U];
+  uint64_t f21 = r[1U];
+  uint64_t f22 = r[2U];
+  uint64_t f23 = r[3U];
+  uint64_t f24 = r[4U];
+  r5[0U] = f200 * 5ULL;
+  r5[1U] = f21 * 5ULL;
+  r5[2U] = f22 * 5ULL;
+  r5[3U] = f23 * 5ULL;
+  r5[4U] = f24 * 5ULL;
+  rn[0U] = r[0U];
+  rn[1U] = r[1U];
+  rn[2U] = r[2U];
+  rn[3U] = r[3U];
+  rn[4U] = r[4U];
+  rn_5[0U] = r5[0U];
+  rn_5[1U] = r5[1U];
+  rn_5[2U] = r5[2U];
+  rn_5[3U] = r5[3U];
+  rn_5[4U] = r5[4U];
+}
+
+static void poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
+{
+  uint64_t *pre = ctx + 5U;
+  uint64_t *acc = ctx;
+  uint32_t nb = len / 16U;
+  uint32_t rem = len % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    uint8_t *block = text + i * 16U;
+    uint64_t e[5U] = { 0U };
+    uint64_t u0 = load64_le(block);
+    uint64_t lo = u0;
+    uint64_t u = load64_le(block + 8U);
+    uint64_t hi = u;
+    uint64_t f0 = lo;
+    uint64_t f1 = hi;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
+    uint64_t f01 = f010;
+    uint64_t f111 = f110;
+    uint64_t f2 = f20;
+    uint64_t f3 = f30;
+    uint64_t f41 = f40;
+    e[0U] = f01;
+    e[1U] = f111;
+    e[2U] = f2;
+    e[3U] = f3;
+    e[4U] = f41;
+    uint64_t b = 0x1000000ULL;
+    uint64_t mask = b;
+    uint64_t f4 = e[4U];
+    e[4U] = f4 | mask;
+    uint64_t *r = pre;
+    uint64_t *r5 = pre + 5U;
+    uint64_t r0 = r[0U];
+    uint64_t r1 = r[1U];
+    uint64_t r2 = r[2U];
+    uint64_t r3 = r[3U];
+    uint64_t r4 = r[4U];
+    uint64_t r51 = r5[1U];
+    uint64_t r52 = r5[2U];
+    uint64_t r53 = r5[3U];
+    uint64_t r54 = r5[4U];
+    uint64_t f10 = e[0U];
+    uint64_t f11 = e[1U];
+    uint64_t f12 = e[2U];
+    uint64_t f13 = e[3U];
+    uint64_t f14 = e[4U];
+    uint64_t a0 = acc[0U];
+    uint64_t a1 = acc[1U];
+    uint64_t a2 = acc[2U];
+    uint64_t a3 = acc[3U];
+    uint64_t a4 = acc[4U];
+    uint64_t a01 = a0 + f10;
+    uint64_t a11 = a1 + f11;
+    uint64_t a21 = a2 + f12;
+    uint64_t a31 = a3 + f13;
+    uint64_t a41 = a4 + f14;
+    uint64_t a02 = r0 * a01;
+    uint64_t a12 = r1 * a01;
+    uint64_t a22 = r2 * a01;
+    uint64_t a32 = r3 * a01;
+    uint64_t a42 = r4 * a01;
+    uint64_t a03 = a02 + r54 * a11;
+    uint64_t a13 = a12 + r0 * a11;
+    uint64_t a23 = a22 + r1 * a11;
+    uint64_t a33 = a32 + r2 * a11;
+    uint64_t a43 = a42 + r3 * a11;
+    uint64_t a04 = a03 + r53 * a21;
+    uint64_t a14 = a13 + r54 * a21;
+    uint64_t a24 = a23 + r0 * a21;
+    uint64_t a34 = a33 + r1 * a21;
+    uint64_t a44 = a43 + r2 * a21;
+    uint64_t a05 = a04 + r52 * a31;
+    uint64_t a15 = a14 + r53 * a31;
+    uint64_t a25 = a24 + r54 * a31;
+    uint64_t a35 = a34 + r0 * a31;
+    uint64_t a45 = a44 + r1 * a31;
+    uint64_t a06 = a05 + r51 * a41;
+    uint64_t a16 = a15 + r52 * a41;
+    uint64_t a26 = a25 + r53 * a41;
+    uint64_t a36 = a35 + r54 * a41;
+    uint64_t a46 = a45 + r0 * a41;
+    uint64_t t0 = a06;
+    uint64_t t1 = a16;
+    uint64_t t2 = a26;
+    uint64_t t3 = a36;
+    uint64_t t4 = a46;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
+    uint64_t x0 = t0 & mask26;
+    uint64_t x3 = t3 & mask26;
+    uint64_t x1 = t1 + z0;
+    uint64_t x4 = t4 + z1;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
+    uint64_t z12 = z11 + t;
+    uint64_t x11 = x1 & mask26;
+    uint64_t x41 = x4 & mask26;
+    uint64_t x2 = t2 + z01;
+    uint64_t x01 = x0 + z12;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
+    uint64_t x21 = x2 & mask26;
+    uint64_t x02 = x01 & mask26;
+    uint64_t x31 = x3 + z02;
+    uint64_t x12 = x11 + z13;
+    uint64_t z03 = x31 >> 26U;
+    uint64_t x32 = x31 & mask26;
+    uint64_t x42 = x41 + z03;
+    uint64_t o0 = x02;
+    uint64_t o1 = x12;
+    uint64_t o2 = x21;
+    uint64_t o3 = x32;
+    uint64_t o4 = x42;
+    acc[0U] = o0;
+    acc[1U] = o1;
+    acc[2U] = o2;
+    acc[3U] = o3;
+    acc[4U] = o4;
+  }
+  if (rem > 0U)
+  {
+    uint8_t *last = text + nb * 16U;
+    uint64_t e[5U] = { 0U };
+    uint8_t tmp[16U] = { 0U };
+    memcpy(tmp, last, rem * sizeof (uint8_t));
+    uint64_t u0 = load64_le(tmp);
+    uint64_t lo = u0;
+    uint64_t u = load64_le(tmp + 8U);
+    uint64_t hi = u;
+    uint64_t f0 = lo;
+    uint64_t f1 = hi;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
+    uint64_t f01 = f010;
+    uint64_t f111 = f110;
+    uint64_t f2 = f20;
+    uint64_t f3 = f30;
+    uint64_t f4 = f40;
+    e[0U] = f01;
+    e[1U] = f111;
+    e[2U] = f2;
+    e[3U] = f3;
+    e[4U] = f4;
+    uint64_t b = 1ULL << rem * 8U % 26U;
+    uint64_t mask = b;
+    uint64_t fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = fi | mask;
+    uint64_t *r = pre;
+    uint64_t *r5 = pre + 5U;
+    uint64_t r0 = r[0U];
+    uint64_t r1 = r[1U];
+    uint64_t r2 = r[2U];
+    uint64_t r3 = r[3U];
+    uint64_t r4 = r[4U];
+    uint64_t r51 = r5[1U];
+    uint64_t r52 = r5[2U];
+    uint64_t r53 = r5[3U];
+    uint64_t r54 = r5[4U];
+    uint64_t f10 = e[0U];
+    uint64_t f11 = e[1U];
+    uint64_t f12 = e[2U];
+    uint64_t f13 = e[3U];
+    uint64_t f14 = e[4U];
+    uint64_t a0 = acc[0U];
+    uint64_t a1 = acc[1U];
+    uint64_t a2 = acc[2U];
+    uint64_t a3 = acc[3U];
+    uint64_t a4 = acc[4U];
+    uint64_t a01 = a0 + f10;
+    uint64_t a11 = a1 + f11;
+    uint64_t a21 = a2 + f12;
+    uint64_t a31 = a3 + f13;
+    uint64_t a41 = a4 + f14;
+    uint64_t a02 = r0 * a01;
+    uint64_t a12 = r1 * a01;
+    uint64_t a22 = r2 * a01;
+    uint64_t a32 = r3 * a01;
+    uint64_t a42 = r4 * a01;
+    uint64_t a03 = a02 + r54 * a11;
+    uint64_t a13 = a12 + r0 * a11;
+    uint64_t a23 = a22 + r1 * a11;
+    uint64_t a33 = a32 + r2 * a11;
+    uint64_t a43 = a42 + r3 * a11;
+    uint64_t a04 = a03 + r53 * a21;
+    uint64_t a14 = a13 + r54 * a21;
+    uint64_t a24 = a23 + r0 * a21;
+    uint64_t a34 = a33 + r1 * a21;
+    uint64_t a44 = a43 + r2 * a21;
+    uint64_t a05 = a04 + r52 * a31;
+    uint64_t a15 = a14 + r53 * a31;
+    uint64_t a25 = a24 + r54 * a31;
+    uint64_t a35 = a34 + r0 * a31;
+    uint64_t a45 = a44 + r1 * a31;
+    uint64_t a06 = a05 + r51 * a41;
+    uint64_t a16 = a15 + r52 * a41;
+    uint64_t a26 = a25 + r53 * a41;
+    uint64_t a36 = a35 + r54 * a41;
+    uint64_t a46 = a45 + r0 * a41;
+    uint64_t t0 = a06;
+    uint64_t t1 = a16;
+    uint64_t t2 = a26;
+    uint64_t t3 = a36;
+    uint64_t t4 = a46;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
+    uint64_t x0 = t0 & mask26;
+    uint64_t x3 = t3 & mask26;
+    uint64_t x1 = t1 + z0;
+    uint64_t x4 = t4 + z1;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
+    uint64_t z12 = z11 + t;
+    uint64_t x11 = x1 & mask26;
+    uint64_t x41 = x4 & mask26;
+    uint64_t x2 = t2 + z01;
+    uint64_t x01 = x0 + z12;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
+    uint64_t x21 = x2 & mask26;
+    uint64_t x02 = x01 & mask26;
+    uint64_t x31 = x3 + z02;
+    uint64_t x12 = x11 + z13;
+    uint64_t z03 = x31 >> 26U;
+    uint64_t x32 = x31 & mask26;
+    uint64_t x42 = x41 + z03;
+    uint64_t o0 = x02;
+    uint64_t o1 = x12;
+    uint64_t o2 = x21;
+    uint64_t o3 = x32;
+    uint64_t o4 = x42;
+    acc[0U] = o0;
+    acc[1U] = o1;
+    acc[2U] = o2;
+    acc[3U] = o3;
+    acc[4U] = o4;
+    return;
+  }
+}
+
+void Hacl_MAC_Poly1305_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
+{
+  uint64_t *acc = ctx;
+  uint8_t *ks = key + 16U;
+  uint64_t f0 = acc[0U];
+  uint64_t f13 = acc[1U];
+  uint64_t f23 = acc[2U];
+  uint64_t f33 = acc[3U];
+  uint64_t f40 = acc[4U];
+  uint64_t l0 = f0 + 0ULL;
+  uint64_t tmp00 = l0 & 0x3ffffffULL;
+  uint64_t c00 = l0 >> 26U;
+  uint64_t l1 = f13 + c00;
+  uint64_t tmp10 = l1 & 0x3ffffffULL;
+  uint64_t c10 = l1 >> 26U;
+  uint64_t l2 = f23 + c10;
+  uint64_t tmp20 = l2 & 0x3ffffffULL;
+  uint64_t c20 = l2 >> 26U;
+  uint64_t l3 = f33 + c20;
+  uint64_t tmp30 = l3 & 0x3ffffffULL;
+  uint64_t c30 = l3 >> 26U;
+  uint64_t l4 = f40 + c30;
+  uint64_t tmp40 = l4 & 0x3ffffffULL;
+  uint64_t c40 = l4 >> 26U;
+  uint64_t f010 = tmp00 + c40 * 5ULL;
+  uint64_t f110 = tmp10;
+  uint64_t f210 = tmp20;
+  uint64_t f310 = tmp30;
+  uint64_t f410 = tmp40;
+  uint64_t l = f010 + 0ULL;
+  uint64_t tmp0 = l & 0x3ffffffULL;
+  uint64_t c0 = l >> 26U;
+  uint64_t l5 = f110 + c0;
+  uint64_t tmp1 = l5 & 0x3ffffffULL;
+  uint64_t c1 = l5 >> 26U;
+  uint64_t l6 = f210 + c1;
+  uint64_t tmp2 = l6 & 0x3ffffffULL;
+  uint64_t c2 = l6 >> 26U;
+  uint64_t l7 = f310 + c2;
+  uint64_t tmp3 = l7 & 0x3ffffffULL;
+  uint64_t c3 = l7 >> 26U;
+  uint64_t l8 = f410 + c3;
+  uint64_t tmp4 = l8 & 0x3ffffffULL;
+  uint64_t c4 = l8 >> 26U;
+  uint64_t f02 = tmp0 + c4 * 5ULL;
+  uint64_t f12 = tmp1;
+  uint64_t f22 = tmp2;
+  uint64_t f32 = tmp3;
+  uint64_t f42 = tmp4;
+  uint64_t mh = 0x3ffffffULL;
+  uint64_t ml = 0x3fffffbULL;
+  uint64_t mask = FStar_UInt64_eq_mask(f42, mh);
+  uint64_t mask1 = mask & FStar_UInt64_eq_mask(f32, mh);
+  uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f22, mh);
+  uint64_t mask3 = mask2 & FStar_UInt64_eq_mask(f12, mh);
+  uint64_t mask4 = mask3 & ~~FStar_UInt64_gte_mask(f02, ml);
+  uint64_t ph = mask4 & mh;
+  uint64_t pl = mask4 & ml;
+  uint64_t o0 = f02 - pl;
+  uint64_t o1 = f12 - ph;
+  uint64_t o2 = f22 - ph;
+  uint64_t o3 = f32 - ph;
+  uint64_t o4 = f42 - ph;
+  uint64_t f011 = o0;
+  uint64_t f111 = o1;
+  uint64_t f211 = o2;
+  uint64_t f311 = o3;
+  uint64_t f411 = o4;
+  acc[0U] = f011;
+  acc[1U] = f111;
+  acc[2U] = f211;
+  acc[3U] = f311;
+  acc[4U] = f411;
+  uint64_t f00 = acc[0U];
+  uint64_t f1 = acc[1U];
+  uint64_t f2 = acc[2U];
+  uint64_t f3 = acc[3U];
+  uint64_t f4 = acc[4U];
+  uint64_t f01 = f00;
+  uint64_t f112 = f1;
+  uint64_t f212 = f2;
+  uint64_t f312 = f3;
+  uint64_t f41 = f4;
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
+  uint64_t f10 = lo;
+  uint64_t f11 = hi;
+  uint64_t u0 = load64_le(ks);
+  uint64_t lo0 = u0;
+  uint64_t u = load64_le(ks + 8U);
+  uint64_t hi0 = u;
+  uint64_t f20 = lo0;
+  uint64_t f21 = hi0;
+  uint64_t r0 = f10 + f20;
+  uint64_t r1 = f11 + f21;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
+  uint64_t r11 = r1 + c;
+  uint64_t f30 = r0;
+  uint64_t f31 = r11;
+  store64_le(tag, f30);
+  store64_le(tag + 8U, f31);
+}
+
+Hacl_MAC_Poly1305_state_t *Hacl_MAC_Poly1305_malloc(uint8_t *key)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t));
+  uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
+  uint64_t *block_state = r1;
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_0 = k_;
+  Hacl_MAC_Poly1305_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
+  Hacl_MAC_Poly1305_state_t
+  *p = (Hacl_MAC_Poly1305_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_MAC_Poly1305_state_t));
+  p[0U] = s;
+  Hacl_MAC_Poly1305_poly1305_init(block_state, key);
+  return p;
+}
+
+void Hacl_MAC_Poly1305_reset(Hacl_MAC_Poly1305_state_t *state, uint8_t *key)
+{
+  Hacl_MAC_Poly1305_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  uint64_t *block_state = scrut.block_state;
+  Hacl_MAC_Poly1305_poly1305_init(block_state, key);
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_1 = k_;
+  Hacl_MAC_Poly1305_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
+  state[0U] = tmp;
+}
+
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint32_t chunk_len)
+{
+  Hacl_MAC_Poly1305_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)16U == 0ULL && total_len > 0ULL)
+  {
+    sz = 16U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)16U);
+  }
+  if (chunk_len <= 16U - sz)
+  {
+    Hacl_MAC_Poly1305_state_t s1 = *state;
+    uint64_t *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 16U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_MAC_Poly1305_state_t s1 = *state;
+    uint64_t *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 16U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 16U, buf);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)16U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 16U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)16U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 16U;
+    uint32_t data1_len = n_blocks * 16U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len,
+          .p_key = k_1
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 16U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_MAC_Poly1305_state_t s1 = *state;
+    uint64_t *block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)16U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 16U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)16U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+    Hacl_MAC_Poly1305_state_t s10 = *state;
+    uint64_t *block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint8_t *k_10 = s10.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 16U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 16U, buf);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)16U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 16U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)16U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 16U;
+    uint32_t data1_len = n_blocks * 16U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff),
+          .p_key = k_10
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+void Hacl_MAC_Poly1305_digest(Hacl_MAC_Poly1305_state_t *state, uint8_t *output)
+{
+  Hacl_MAC_Poly1305_state_t scrut = *state;
+  uint64_t *block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint8_t *k_ = scrut.p_key;
+  uint32_t r;
+  if (total_len % (uint64_t)16U == 0ULL && total_len > 0ULL)
+  {
+    r = 16U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)16U);
+  }
+  uint8_t *buf_1 = buf_;
+  uint64_t r1[25U] = { 0U };
+  uint64_t *tmp_block_state = r1;
+  memcpy(tmp_block_state, block_state, 25U * sizeof (uint64_t));
+  uint32_t ite;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite = 16U;
+  }
+  else
+  {
+    ite = r % 16U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  poly1305_update(tmp_block_state, 0U, buf_multi);
+  poly1305_update(tmp_block_state, r, buf_last);
+  uint64_t tmp[25U] = { 0U };
+  memcpy(tmp, tmp_block_state, 25U * sizeof (uint64_t));
+  Hacl_MAC_Poly1305_poly1305_finish(output, k_, tmp);
+}
+
+void Hacl_MAC_Poly1305_free(Hacl_MAC_Poly1305_state_t *state)
+{
+  Hacl_MAC_Poly1305_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  uint64_t *block_state = scrut.block_state;
+  KRML_HOST_FREE(k_);
+  KRML_HOST_FREE(block_state);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+void Hacl_MAC_Poly1305_mac(uint8_t *output, uint8_t *input, uint32_t input_len, uint8_t *key)
+{
+  uint64_t ctx[25U] = { 0U };
+  Hacl_MAC_Poly1305_poly1305_init(ctx, key);
+  poly1305_update(ctx, input_len, input);
+  Hacl_MAC_Poly1305_poly1305_finish(output, key, ctx);
+}
+
diff --git a/src/msvc/Hacl_Poly1305_128.c b/src/Hacl_MAC_Poly1305_Simd128.c
similarity index 66%
rename from src/msvc/Hacl_Poly1305_128.c
rename to src/Hacl_MAC_Poly1305_Simd128.c
index f400fe82..17e26978 100644
--- a/src/msvc/Hacl_Poly1305_128.c
+++ b/src/Hacl_MAC_Poly1305_Simd128.c
@@ -23,40 +23,34 @@
  */
 
 
-#include "internal/Hacl_Poly1305_128.h"
+#include "internal/Hacl_MAC_Poly1305_Simd128.h"
 
-void
-Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b)
+void Hacl_MAC_Poly1305_Simd128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b)
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(b);
-  Lib_IntVector_Intrinsics_vec128
-  b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + (uint32_t)16U);
+  Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + 16U);
   Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
   Lib_IntVector_Intrinsics_vec128 hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
   Lib_IntVector_Intrinsics_vec128
   f00 =
     Lib_IntVector_Intrinsics_vec128_and(lo,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f10 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
   Lib_IntVector_Intrinsics_vec128 f02 = f00;
   Lib_IntVector_Intrinsics_vec128 f12 = f10;
   Lib_IntVector_Intrinsics_vec128 f22 = f20;
@@ -67,7 +61,7 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
   e[2U] = f22;
   e[3U] = f32;
   e[4U] = f42;
-  uint64_t b10 = (uint64_t)0x1000000U;
+  uint64_t b10 = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b10);
   Lib_IntVector_Intrinsics_vec128 f43 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f43, mask);
@@ -81,16 +75,11 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
   Lib_IntVector_Intrinsics_vec128 e2 = e[2U];
   Lib_IntVector_Intrinsics_vec128 e3 = e[3U];
   Lib_IntVector_Intrinsics_vec128 e4 = e[4U];
-  Lib_IntVector_Intrinsics_vec128
-  f0 = Lib_IntVector_Intrinsics_vec128_insert64(acc0, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f1 = Lib_IntVector_Intrinsics_vec128_insert64(acc1, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f2 = Lib_IntVector_Intrinsics_vec128_insert64(acc2, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f3 = Lib_IntVector_Intrinsics_vec128_insert64(acc3, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f4 = Lib_IntVector_Intrinsics_vec128_insert64(acc4, (uint64_t)0U, (uint32_t)1U);
+  Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_insert64(acc0, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_insert64(acc1, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f2 = Lib_IntVector_Intrinsics_vec128_insert64(acc2, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f3 = Lib_IntVector_Intrinsics_vec128_insert64(acc3, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f4 = Lib_IntVector_Intrinsics_vec128_insert64(acc4, 0ULL, 1U);
   Lib_IntVector_Intrinsics_vec128 f01 = Lib_IntVector_Intrinsics_vec128_add64(f0, e0);
   Lib_IntVector_Intrinsics_vec128 f11 = Lib_IntVector_Intrinsics_vec128_add64(f1, e1);
   Lib_IntVector_Intrinsics_vec128 f21 = Lib_IntVector_Intrinsics_vec128_add64(f2, e2);
@@ -109,13 +98,13 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
 }
 
 void
-Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
+Hacl_MAC_Poly1305_Simd128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128 *out,
   Lib_IntVector_Intrinsics_vec128 *p
 )
 {
   Lib_IntVector_Intrinsics_vec128 *r = p;
-  Lib_IntVector_Intrinsics_vec128 *r2 = p + (uint32_t)10U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = p + 10U;
   Lib_IntVector_Intrinsics_vec128 a0 = out[0U];
   Lib_IntVector_Intrinsics_vec128 a1 = out[1U];
   Lib_IntVector_Intrinsics_vec128 a2 = out[2U];
@@ -141,14 +130,10 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   r231 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r23, r13);
   Lib_IntVector_Intrinsics_vec128
   r241 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r24, r14);
-  Lib_IntVector_Intrinsics_vec128
-  r251 = Lib_IntVector_Intrinsics_vec128_smul64(r211, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r252 = Lib_IntVector_Intrinsics_vec128_smul64(r221, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r253 = Lib_IntVector_Intrinsics_vec128_smul64(r231, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r254 = Lib_IntVector_Intrinsics_vec128_smul64(r241, (uint64_t)5U);
+  Lib_IntVector_Intrinsics_vec128 r251 = Lib_IntVector_Intrinsics_vec128_smul64(r211, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r252 = Lib_IntVector_Intrinsics_vec128_smul64(r221, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r253 = Lib_IntVector_Intrinsics_vec128_smul64(r231, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r254 = Lib_IntVector_Intrinsics_vec128_smul64(r241, 5ULL);
   Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_mul64(r201, a0);
   Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_mul64(r211, a0);
   Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_mul64(r221, a0);
@@ -239,37 +224,28 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128 t2 = a25;
   Lib_IntVector_Intrinsics_vec128 t3 = a35;
   Lib_IntVector_Intrinsics_vec128 t4 = a45;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -302,41 +278,36 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128
   tmp0 =
     Lib_IntVector_Intrinsics_vec128_and(l,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(o11, c0);
   Lib_IntVector_Intrinsics_vec128
   tmp1 =
     Lib_IntVector_Intrinsics_vec128_and(l0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(o21, c1);
   Lib_IntVector_Intrinsics_vec128
   tmp2 =
     Lib_IntVector_Intrinsics_vec128_and(l1,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(o31, c2);
   Lib_IntVector_Intrinsics_vec128
   tmp3 =
     Lib_IntVector_Intrinsics_vec128_and(l2,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(o41, c3);
   Lib_IntVector_Intrinsics_vec128
   tmp4 =
     Lib_IntVector_Intrinsics_vec128_and(l3,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec128
   o00 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec128 o1 = tmp1;
   Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
   Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
@@ -348,10 +319,11 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   out[4U] = o4;
 }
 
-void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key)
+void
+Hacl_MAC_Poly1305_Simd128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key)
 {
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   uint8_t *kr = key;
   acc[0U] = Lib_IntVector_Intrinsics_vec128_zero;
   acc[1U] = Lib_IntVector_Intrinsics_vec128_zero;
@@ -360,41 +332,38 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   acc[4U] = Lib_IntVector_Intrinsics_vec128_zero;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec128 *rn = pre + (uint32_t)10U;
-  Lib_IntVector_Intrinsics_vec128 *rn_5 = pre + (uint32_t)15U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
+  Lib_IntVector_Intrinsics_vec128 *rn = pre + 10U;
+  Lib_IntVector_Intrinsics_vec128 *rn_5 = pre + 15U;
   Lib_IntVector_Intrinsics_vec128 r_vec0 = Lib_IntVector_Intrinsics_vec128_load64(lo1);
   Lib_IntVector_Intrinsics_vec128 r_vec1 = Lib_IntVector_Intrinsics_vec128_load64(hi1);
   Lib_IntVector_Intrinsics_vec128
   f00 =
     Lib_IntVector_Intrinsics_vec128_and(r_vec0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f15 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(r_vec1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, (uint32_t)40U);
+  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, 40U);
   Lib_IntVector_Intrinsics_vec128 f0 = f00;
   Lib_IntVector_Intrinsics_vec128 f1 = f15;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -410,11 +379,11 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 f220 = r[2U];
   Lib_IntVector_Intrinsics_vec128 f230 = r[3U];
   Lib_IntVector_Intrinsics_vec128 f240 = r[4U];
-  r5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f200, (uint64_t)5U);
-  r5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f210, (uint64_t)5U);
-  r5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f220, (uint64_t)5U);
-  r5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f230, (uint64_t)5U);
-  r5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f240, (uint64_t)5U);
+  r5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f200, 5ULL);
+  r5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f210, 5ULL);
+  r5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f220, 5ULL);
+  r5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f230, 5ULL);
+  r5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f240, 5ULL);
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -511,37 +480,28 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 t2 = a24;
   Lib_IntVector_Intrinsics_vec128 t3 = a34;
   Lib_IntVector_Intrinsics_vec128 t4 = a44;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -559,275 +519,56 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 f22 = rn[2U];
   Lib_IntVector_Intrinsics_vec128 f23 = rn[3U];
   Lib_IntVector_Intrinsics_vec128 f24 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f201, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f21, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f22, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f23, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f24, (uint64_t)5U);
-}
-
-void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *text)
-{
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
-  uint64_t u0 = load64_le(text);
-  uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
-  uint64_t hi = u;
-  Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
-  Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
-  Lib_IntVector_Intrinsics_vec128
-  f010 =
-    Lib_IntVector_Intrinsics_vec128_and(f0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f110 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)52U),
-      Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
-  Lib_IntVector_Intrinsics_vec128
-  f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
-  Lib_IntVector_Intrinsics_vec128 f01 = f010;
-  Lib_IntVector_Intrinsics_vec128 f111 = f110;
-  Lib_IntVector_Intrinsics_vec128 f2 = f20;
-  Lib_IntVector_Intrinsics_vec128 f3 = f30;
-  Lib_IntVector_Intrinsics_vec128 f41 = f40;
-  e[0U] = f01;
-  e[1U] = f111;
-  e[2U] = f2;
-  e[3U] = f3;
-  e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
-  Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-  Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
-  e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-  Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
-  Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
-  Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
-  Lib_IntVector_Intrinsics_vec128 r3 = r[3U];
-  Lib_IntVector_Intrinsics_vec128 r4 = r[4U];
-  Lib_IntVector_Intrinsics_vec128 r51 = r5[1U];
-  Lib_IntVector_Intrinsics_vec128 r52 = r5[2U];
-  Lib_IntVector_Intrinsics_vec128 r53 = r5[3U];
-  Lib_IntVector_Intrinsics_vec128 r54 = r5[4U];
-  Lib_IntVector_Intrinsics_vec128 f10 = e[0U];
-  Lib_IntVector_Intrinsics_vec128 f11 = e[1U];
-  Lib_IntVector_Intrinsics_vec128 f12 = e[2U];
-  Lib_IntVector_Intrinsics_vec128 f13 = e[3U];
-  Lib_IntVector_Intrinsics_vec128 f14 = e[4U];
-  Lib_IntVector_Intrinsics_vec128 a0 = acc[0U];
-  Lib_IntVector_Intrinsics_vec128 a1 = acc[1U];
-  Lib_IntVector_Intrinsics_vec128 a2 = acc[2U];
-  Lib_IntVector_Intrinsics_vec128 a3 = acc[3U];
-  Lib_IntVector_Intrinsics_vec128 a4 = acc[4U];
-  Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_add64(a0, f10);
-  Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, f11);
-  Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, f12);
-  Lib_IntVector_Intrinsics_vec128 a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, f13);
-  Lib_IntVector_Intrinsics_vec128 a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, f14);
-  Lib_IntVector_Intrinsics_vec128 a02 = Lib_IntVector_Intrinsics_vec128_mul64(r0, a01);
-  Lib_IntVector_Intrinsics_vec128 a12 = Lib_IntVector_Intrinsics_vec128_mul64(r1, a01);
-  Lib_IntVector_Intrinsics_vec128 a22 = Lib_IntVector_Intrinsics_vec128_mul64(r2, a01);
-  Lib_IntVector_Intrinsics_vec128 a32 = Lib_IntVector_Intrinsics_vec128_mul64(r3, a01);
-  Lib_IntVector_Intrinsics_vec128 a42 = Lib_IntVector_Intrinsics_vec128_mul64(r4, a01);
-  Lib_IntVector_Intrinsics_vec128
-  a03 =
-    Lib_IntVector_Intrinsics_vec128_add64(a02,
-      Lib_IntVector_Intrinsics_vec128_mul64(r54, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a13 =
-    Lib_IntVector_Intrinsics_vec128_add64(a12,
-      Lib_IntVector_Intrinsics_vec128_mul64(r0, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a23 =
-    Lib_IntVector_Intrinsics_vec128_add64(a22,
-      Lib_IntVector_Intrinsics_vec128_mul64(r1, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a33 =
-    Lib_IntVector_Intrinsics_vec128_add64(a32,
-      Lib_IntVector_Intrinsics_vec128_mul64(r2, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a43 =
-    Lib_IntVector_Intrinsics_vec128_add64(a42,
-      Lib_IntVector_Intrinsics_vec128_mul64(r3, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a04 =
-    Lib_IntVector_Intrinsics_vec128_add64(a03,
-      Lib_IntVector_Intrinsics_vec128_mul64(r53, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a14 =
-    Lib_IntVector_Intrinsics_vec128_add64(a13,
-      Lib_IntVector_Intrinsics_vec128_mul64(r54, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a24 =
-    Lib_IntVector_Intrinsics_vec128_add64(a23,
-      Lib_IntVector_Intrinsics_vec128_mul64(r0, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a34 =
-    Lib_IntVector_Intrinsics_vec128_add64(a33,
-      Lib_IntVector_Intrinsics_vec128_mul64(r1, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a44 =
-    Lib_IntVector_Intrinsics_vec128_add64(a43,
-      Lib_IntVector_Intrinsics_vec128_mul64(r2, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a05 =
-    Lib_IntVector_Intrinsics_vec128_add64(a04,
-      Lib_IntVector_Intrinsics_vec128_mul64(r52, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a15 =
-    Lib_IntVector_Intrinsics_vec128_add64(a14,
-      Lib_IntVector_Intrinsics_vec128_mul64(r53, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a25 =
-    Lib_IntVector_Intrinsics_vec128_add64(a24,
-      Lib_IntVector_Intrinsics_vec128_mul64(r54, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a35 =
-    Lib_IntVector_Intrinsics_vec128_add64(a34,
-      Lib_IntVector_Intrinsics_vec128_mul64(r0, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a45 =
-    Lib_IntVector_Intrinsics_vec128_add64(a44,
-      Lib_IntVector_Intrinsics_vec128_mul64(r1, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a06 =
-    Lib_IntVector_Intrinsics_vec128_add64(a05,
-      Lib_IntVector_Intrinsics_vec128_mul64(r51, a41));
-  Lib_IntVector_Intrinsics_vec128
-  a16 =
-    Lib_IntVector_Intrinsics_vec128_add64(a15,
-      Lib_IntVector_Intrinsics_vec128_mul64(r52, a41));
-  Lib_IntVector_Intrinsics_vec128
-  a26 =
-    Lib_IntVector_Intrinsics_vec128_add64(a25,
-      Lib_IntVector_Intrinsics_vec128_mul64(r53, a41));
-  Lib_IntVector_Intrinsics_vec128
-  a36 =
-    Lib_IntVector_Intrinsics_vec128_add64(a35,
-      Lib_IntVector_Intrinsics_vec128_mul64(r54, a41));
-  Lib_IntVector_Intrinsics_vec128
-  a46 =
-    Lib_IntVector_Intrinsics_vec128_add64(a45,
-      Lib_IntVector_Intrinsics_vec128_mul64(r0, a41));
-  Lib_IntVector_Intrinsics_vec128 t0 = a06;
-  Lib_IntVector_Intrinsics_vec128 t1 = a16;
-  Lib_IntVector_Intrinsics_vec128 t2 = a26;
-  Lib_IntVector_Intrinsics_vec128 t3 = a36;
-  Lib_IntVector_Intrinsics_vec128 t4 = a46;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
-  Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
-  Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
-  Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
-  Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
-  Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
-  Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
-  Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
-  Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
-  Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
-  Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
-  Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
-  Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
-  Lib_IntVector_Intrinsics_vec128 o0 = x02;
-  Lib_IntVector_Intrinsics_vec128 o1 = x12;
-  Lib_IntVector_Intrinsics_vec128 o2 = x21;
-  Lib_IntVector_Intrinsics_vec128 o3 = x32;
-  Lib_IntVector_Intrinsics_vec128 o4 = x42;
-  acc[0U] = o0;
-  acc[1U] = o1;
-  acc[2U] = o2;
-  acc[3U] = o3;
-  acc[4U] = o4;
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f201, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f21, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f22, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f23, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f24, 5ULL);
 }
 
-void
-Hacl_Poly1305_128_poly1305_update(
-  Lib_IntVector_Intrinsics_vec128 *ctx,
-  uint32_t len,
-  uint8_t *text
-)
+static void poly1305_update(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text)
 {
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  uint32_t sz_block = (uint32_t)32U;
+  uint32_t sz_block = 32U;
   uint32_t len0 = len / sz_block * sz_block;
   uint8_t *t0 = text;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)32U;
+    uint32_t bs = 32U;
     uint8_t *text0 = t0;
-    Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc, text0);
+    Hacl_MAC_Poly1305_Simd128_load_acc2(acc, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t0 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
       Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
-      Lib_IntVector_Intrinsics_vec128
-      b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
+      Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + 16U);
       Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       f00 =
         Lib_IntVector_Intrinsics_vec128_and(lo,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f15 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)26U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f25 =
-        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)52U),
+        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
           Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-              Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-            (uint32_t)12U));
+              Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+            12U));
       Lib_IntVector_Intrinsics_vec128
       f30 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-            (uint32_t)14U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-      Lib_IntVector_Intrinsics_vec128
-      f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+      Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
       Lib_IntVector_Intrinsics_vec128 f0 = f00;
       Lib_IntVector_Intrinsics_vec128 f1 = f15;
       Lib_IntVector_Intrinsics_vec128 f2 = f25;
@@ -838,12 +579,12 @@ Hacl_Poly1305_128_poly1305_update(
       e[2U] = f2;
       e[3U] = f3;
       e[4U] = f41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
       Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec128 *rn = pre + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec128 *rn5 = pre + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec128 *rn = pre + 10U;
+      Lib_IntVector_Intrinsics_vec128 *rn5 = pre + 15U;
       Lib_IntVector_Intrinsics_vec128 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec128 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec128 r2 = rn[2U];
@@ -948,37 +689,28 @@ Hacl_Poly1305_128_poly1305_update(
       Lib_IntVector_Intrinsics_vec128 t2 = a24;
       Lib_IntVector_Intrinsics_vec128 t3 = a34;
       Lib_IntVector_Intrinsics_vec128 t4 = a44;
-      Lib_IntVector_Intrinsics_vec128
-      mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec128
-      z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec128
-      z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
       Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec128
-      z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec128
-      z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec128 o00 = x02;
@@ -1012,45 +744,41 @@ Hacl_Poly1305_128_poly1305_update(
       acc[3U] = o3;
       acc[4U] = o4;
     }
-    Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(acc, pre);
+    Hacl_MAC_Poly1305_Simd128_fmul_r2_normalize(acc, pre);
   }
   uint32_t len1 = len - len0;
   uint8_t *t1 = text + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t1 + i * (uint32_t)16U;
+    uint8_t *block = t1 + i * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -1061,12 +789,12 @@ Hacl_Poly1305_128_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1181,37 +909,28 @@ Hacl_Poly1305_128_poly1305_update(
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1225,41 +944,37 @@ Hacl_Poly1305_128_poly1305_update(
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = t1 + nb * (uint32_t)16U;
+    uint8_t *last = t1 + nb * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -1270,12 +985,12 @@ Hacl_Poly1305_128_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-    Lib_IntVector_Intrinsics_vec128 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec128 fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
     Lib_IntVector_Intrinsics_vec128 *r = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1390,37 +1105,28 @@ Hacl_Poly1305_128_poly1305_update(
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1438,14 +1144,14 @@ Hacl_Poly1305_128_poly1305_update(
 }
 
 void
-Hacl_Poly1305_128_poly1305_finish(
+Hacl_MAC_Poly1305_Simd128_poly1305_finish(
   uint8_t *tag,
   uint8_t *key,
   Lib_IntVector_Intrinsics_vec128 *ctx
 )
 {
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   Lib_IntVector_Intrinsics_vec128 f0 = acc[0U];
   Lib_IntVector_Intrinsics_vec128 f13 = acc[1U];
   Lib_IntVector_Intrinsics_vec128 f23 = acc[2U];
@@ -1456,41 +1162,36 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128
   tmp00 =
     Lib_IntVector_Intrinsics_vec128_and(l0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(f13, c00);
   Lib_IntVector_Intrinsics_vec128
   tmp10 =
     Lib_IntVector_Intrinsics_vec128_and(l1,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(f23, c10);
   Lib_IntVector_Intrinsics_vec128
   tmp20 =
     Lib_IntVector_Intrinsics_vec128_and(l2,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(f33, c20);
   Lib_IntVector_Intrinsics_vec128
   tmp30 =
     Lib_IntVector_Intrinsics_vec128_and(l3,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec128 l4 = Lib_IntVector_Intrinsics_vec128_add64(f40, c30);
   Lib_IntVector_Intrinsics_vec128
   tmp40 =
     Lib_IntVector_Intrinsics_vec128_and(l4,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, 26U);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp00,
-      Lib_IntVector_Intrinsics_vec128_smul64(c40, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c40, 5ULL));
   Lib_IntVector_Intrinsics_vec128 f110 = tmp10;
   Lib_IntVector_Intrinsics_vec128 f210 = tmp20;
   Lib_IntVector_Intrinsics_vec128 f310 = tmp30;
@@ -1500,49 +1201,42 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128
   tmp0 =
     Lib_IntVector_Intrinsics_vec128_and(l,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec128 l5 = Lib_IntVector_Intrinsics_vec128_add64(f110, c0);
   Lib_IntVector_Intrinsics_vec128
   tmp1 =
     Lib_IntVector_Intrinsics_vec128_and(l5,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, 26U);
   Lib_IntVector_Intrinsics_vec128 l6 = Lib_IntVector_Intrinsics_vec128_add64(f210, c1);
   Lib_IntVector_Intrinsics_vec128
   tmp2 =
     Lib_IntVector_Intrinsics_vec128_and(l6,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, 26U);
   Lib_IntVector_Intrinsics_vec128 l7 = Lib_IntVector_Intrinsics_vec128_add64(f310, c2);
   Lib_IntVector_Intrinsics_vec128
   tmp3 =
     Lib_IntVector_Intrinsics_vec128_and(l7,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, 26U);
   Lib_IntVector_Intrinsics_vec128 l8 = Lib_IntVector_Intrinsics_vec128_add64(f410, c3);
   Lib_IntVector_Intrinsics_vec128
   tmp4 =
     Lib_IntVector_Intrinsics_vec128_and(l8,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, 26U);
   Lib_IntVector_Intrinsics_vec128
   f02 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec128 f12 = tmp1;
   Lib_IntVector_Intrinsics_vec128 f22 = tmp2;
   Lib_IntVector_Intrinsics_vec128 f32 = tmp3;
   Lib_IntVector_Intrinsics_vec128 f42 = tmp4;
-  Lib_IntVector_Intrinsics_vec128
-  mh = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  ml = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffffbU);
+  Lib_IntVector_Intrinsics_vec128 mh = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 ml = Lib_IntVector_Intrinsics_vec128_load64(0x3fffffbULL);
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_eq64(f42, mh);
   Lib_IntVector_Intrinsics_vec128
   mask1 =
@@ -1582,36 +1276,334 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128 f2 = acc[2U];
   Lib_IntVector_Intrinsics_vec128 f3 = acc[3U];
   Lib_IntVector_Intrinsics_vec128 f4 = acc[4U];
-  uint64_t f01 = Lib_IntVector_Intrinsics_vec128_extract64(f00, (uint32_t)0U);
-  uint64_t f112 = Lib_IntVector_Intrinsics_vec128_extract64(f1, (uint32_t)0U);
-  uint64_t f212 = Lib_IntVector_Intrinsics_vec128_extract64(f2, (uint32_t)0U);
-  uint64_t f312 = Lib_IntVector_Intrinsics_vec128_extract64(f3, (uint32_t)0U);
-  uint64_t f41 = Lib_IntVector_Intrinsics_vec128_extract64(f4, (uint32_t)0U);
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t f01 = Lib_IntVector_Intrinsics_vec128_extract64(f00, 0U);
+  uint64_t f112 = Lib_IntVector_Intrinsics_vec128_extract64(f1, 0U);
+  uint64_t f212 = Lib_IntVector_Intrinsics_vec128_extract64(f2, 0U);
+  uint64_t f312 = Lib_IntVector_Intrinsics_vec128_extract64(f3, 0U);
+  uint64_t f41 = Lib_IntVector_Intrinsics_vec128_extract64(f4, 0U);
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
+}
+
+Hacl_MAC_Poly1305_Simd128_state_t *Hacl_MAC_Poly1305_Simd128_malloc(uint8_t *key)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  Lib_IntVector_Intrinsics_vec128
+  *r1 =
+    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 25U);
+  memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Lib_IntVector_Intrinsics_vec128 *block_state = r1;
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_0 = k_;
+  Hacl_MAC_Poly1305_Simd128_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
+  Hacl_MAC_Poly1305_Simd128_state_t
+  *p =
+    (Hacl_MAC_Poly1305_Simd128_state_t *)KRML_HOST_MALLOC(sizeof (
+        Hacl_MAC_Poly1305_Simd128_state_t
+      ));
+  p[0U] = s;
+  Hacl_MAC_Poly1305_Simd128_poly1305_init(block_state, key);
+  return p;
+}
+
+void Hacl_MAC_Poly1305_Simd128_reset(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *key)
+{
+  Hacl_MAC_Poly1305_Simd128_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
+  Hacl_MAC_Poly1305_Simd128_poly1305_init(block_state, key);
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_1 = k_;
+  Hacl_MAC_Poly1305_Simd128_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
+  state[0U] = tmp;
 }
 
-void Hacl_Poly1305_128_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_MAC_Poly1305_Simd128_update(
+  Hacl_MAC_Poly1305_Simd128_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+)
+{
+  Hacl_MAC_Poly1305_Simd128_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)32U == 0ULL && total_len > 0ULL)
+  {
+    sz = 32U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)32U);
+  }
+  if (chunk_len <= 32U - sz)
+  {
+    Hacl_MAC_Poly1305_Simd128_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 32U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_MAC_Poly1305_Simd128_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 32U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 32U, buf);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)32U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 32U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)32U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 32U;
+    uint32_t data1_len = n_blocks * 32U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len,
+          .p_key = k_1
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 32U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_MAC_Poly1305_Simd128_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec128 *block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)32U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 32U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)32U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd128_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+    Hacl_MAC_Poly1305_Simd128_state_t s10 = *state;
+    Lib_IntVector_Intrinsics_vec128 *block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint8_t *k_10 = s10.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 32U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 32U, buf);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)32U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 32U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)32U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 32U;
+    uint32_t data1_len = n_blocks * 32U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff),
+          .p_key = k_10
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+void
+Hacl_MAC_Poly1305_Simd128_digest(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *output)
+{
+  Hacl_MAC_Poly1305_Simd128_state_t scrut = *state;
+  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint8_t *k_ = scrut.p_key;
+  uint32_t r;
+  if (total_len % (uint64_t)32U == 0ULL && total_len > 0ULL)
+  {
+    r = 32U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)32U);
+  }
+  uint8_t *buf_1 = buf_;
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 r1[25U] KRML_POST_ALIGN(16) = { 0U };
+  Lib_IntVector_Intrinsics_vec128 *tmp_block_state = r1;
+  memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  uint32_t ite0;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite0 = 16U;
+  }
+  else
+  {
+    ite0 = r % 16U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite0;
+  uint8_t *buf_multi = buf_1;
+  uint32_t ite;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite = 16U;
+  }
+  else
+  {
+    ite = r % 16U;
+  }
+  poly1305_update(tmp_block_state, r - ite, buf_multi);
+  uint32_t ite1;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite1 = 16U;
+  }
+  else
+  {
+    ite1 = r % 16U;
+  }
+  poly1305_update(tmp_block_state, ite1, buf_last);
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 tmp[25U] KRML_POST_ALIGN(16) = { 0U };
+  memcpy(tmp, tmp_block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Hacl_MAC_Poly1305_Simd128_poly1305_finish(output, k_, tmp);
+}
+
+void Hacl_MAC_Poly1305_Simd128_free(Hacl_MAC_Poly1305_Simd128_state_t *state)
+{
+  Hacl_MAC_Poly1305_Simd128_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
+  KRML_HOST_FREE(k_);
+  KRML_ALIGNED_FREE(block_state);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+void
+Hacl_MAC_Poly1305_Simd128_mac(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key
+)
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[25U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Poly1305_128_poly1305_init(ctx, key);
-  Hacl_Poly1305_128_poly1305_update(ctx, len, text);
-  Hacl_Poly1305_128_poly1305_finish(tag, key, ctx);
+  Hacl_MAC_Poly1305_Simd128_poly1305_init(ctx, key);
+  poly1305_update(ctx, input_len, input);
+  Hacl_MAC_Poly1305_Simd128_poly1305_finish(output, key, ctx);
 }
 
diff --git a/src/msvc/Hacl_Poly1305_256.c b/src/Hacl_MAC_Poly1305_Simd256.c
similarity index 71%
rename from src/msvc/Hacl_Poly1305_256.c
rename to src/Hacl_MAC_Poly1305_Simd256.c
index db28cdc7..f25e8fff 100644
--- a/src/msvc/Hacl_Poly1305_256.c
+++ b/src/Hacl_MAC_Poly1305_Simd256.c
@@ -23,39 +23,30 @@
  */
 
 
-#include "internal/Hacl_Poly1305_256.h"
+#include "internal/Hacl_MAC_Poly1305_Simd256.h"
 
-void
-Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b)
+void Hacl_MAC_Poly1305_Simd256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(b);
-  Lib_IntVector_Intrinsics_vec256
-  hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + (uint32_t)32U);
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+  Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + 32U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
   Lib_IntVector_Intrinsics_vec256 m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
   Lib_IntVector_Intrinsics_vec256
   m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-  Lib_IntVector_Intrinsics_vec256
-  m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-  Lib_IntVector_Intrinsics_vec256
-  m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+  Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+  Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
   Lib_IntVector_Intrinsics_vec256 m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
   Lib_IntVector_Intrinsics_vec256 t0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
   Lib_IntVector_Intrinsics_vec256 t3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-  Lib_IntVector_Intrinsics_vec256
-  t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)4U);
+  Lib_IntVector_Intrinsics_vec256 t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 4U);
   Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t2, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
   Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t1, mask26);
   Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)30U);
+  Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 30U);
   Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+  Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
   Lib_IntVector_Intrinsics_vec256 o0 = o5;
   Lib_IntVector_Intrinsics_vec256 o1 = o10;
   Lib_IntVector_Intrinsics_vec256 o2 = o20;
@@ -66,7 +57,7 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
   e[2U] = o2;
   e[3U] = o3;
   e[4U] = o4;
-  uint64_t b1 = (uint64_t)0x1000000U;
+  uint64_t b1 = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b1);
   Lib_IntVector_Intrinsics_vec256 f40 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f40, mask);
@@ -88,28 +79,28 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
   Lib_IntVector_Intrinsics_vec256
   r01 =
     Lib_IntVector_Intrinsics_vec256_insert64(r0,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc0, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc0, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r11 =
     Lib_IntVector_Intrinsics_vec256_insert64(r1,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc1, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc1, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r21 =
     Lib_IntVector_Intrinsics_vec256_insert64(r2,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc2, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc2, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r31 =
     Lib_IntVector_Intrinsics_vec256_insert64(r3,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc3, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc3, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r41 =
     Lib_IntVector_Intrinsics_vec256_insert64(r4,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc4, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc4, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_add64(r01, e0);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_add64(r11, e1);
   Lib_IntVector_Intrinsics_vec256 f2 = Lib_IntVector_Intrinsics_vec256_add64(r21, e2);
@@ -128,14 +119,14 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
 }
 
 void
-Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
+Hacl_MAC_Poly1305_Simd256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 *out,
   Lib_IntVector_Intrinsics_vec256 *p
 )
 {
   Lib_IntVector_Intrinsics_vec256 *r = p;
-  Lib_IntVector_Intrinsics_vec256 *r_5 = p + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *r4 = p + (uint32_t)10U;
+  Lib_IntVector_Intrinsics_vec256 *r_5 = p + 5U;
+  Lib_IntVector_Intrinsics_vec256 *r4 = p + 10U;
   Lib_IntVector_Intrinsics_vec256 a0 = out[0U];
   Lib_IntVector_Intrinsics_vec256 a1 = out[1U];
   Lib_IntVector_Intrinsics_vec256 a2 = out[2U];
@@ -245,37 +236,30 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t20 = a250;
   Lib_IntVector_Intrinsics_vec256 t30 = a350;
   Lib_IntVector_Intrinsics_vec256 t40 = a450;
-  Lib_IntVector_Intrinsics_vec256
-  mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, 26U);
+  Lib_IntVector_Intrinsics_vec256 z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 26U);
   Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
   Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
   Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
   Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
-  Lib_IntVector_Intrinsics_vec256
-  z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, 26U);
+  Lib_IntVector_Intrinsics_vec256 z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, 26U);
+  Lib_IntVector_Intrinsics_vec256 t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
   Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
   Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
   Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
   Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
+  Lib_IntVector_Intrinsics_vec256 z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
+  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, 26U);
   Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
   Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
   Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
   Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
   Lib_IntVector_Intrinsics_vec256
-  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
+  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, 26U);
   Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
   Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
   Lib_IntVector_Intrinsics_vec256 r20 = x020;
@@ -373,37 +357,30 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t21 = a251;
   Lib_IntVector_Intrinsics_vec256 t31 = a351;
   Lib_IntVector_Intrinsics_vec256 t41 = a451;
-  Lib_IntVector_Intrinsics_vec256
-  mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask261 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+  Lib_IntVector_Intrinsics_vec256 z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, 26U);
   Lib_IntVector_Intrinsics_vec256 x03 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
   Lib_IntVector_Intrinsics_vec256 x33 = Lib_IntVector_Intrinsics_vec256_and(t31, mask261);
   Lib_IntVector_Intrinsics_vec256 x13 = Lib_IntVector_Intrinsics_vec256_add64(t11, z04);
   Lib_IntVector_Intrinsics_vec256 x43 = Lib_IntVector_Intrinsics_vec256_add64(t41, z14);
-  Lib_IntVector_Intrinsics_vec256
-  z011 = Lib_IntVector_Intrinsics_vec256_shift_right64(x13, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z111 = Lib_IntVector_Intrinsics_vec256_shift_right64(x43, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z011 = Lib_IntVector_Intrinsics_vec256_shift_right64(x13, 26U);
+  Lib_IntVector_Intrinsics_vec256 z111 = Lib_IntVector_Intrinsics_vec256_shift_right64(x43, 26U);
+  Lib_IntVector_Intrinsics_vec256 t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, 2U);
   Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z111, t6);
   Lib_IntVector_Intrinsics_vec256 x111 = Lib_IntVector_Intrinsics_vec256_and(x13, mask261);
   Lib_IntVector_Intrinsics_vec256 x411 = Lib_IntVector_Intrinsics_vec256_and(x43, mask261);
   Lib_IntVector_Intrinsics_vec256 x22 = Lib_IntVector_Intrinsics_vec256_add64(t21, z011);
   Lib_IntVector_Intrinsics_vec256 x011 = Lib_IntVector_Intrinsics_vec256_add64(x03, z120);
+  Lib_IntVector_Intrinsics_vec256 z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, (uint32_t)26U);
+  z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, 26U);
   Lib_IntVector_Intrinsics_vec256 x211 = Lib_IntVector_Intrinsics_vec256_and(x22, mask261);
   Lib_IntVector_Intrinsics_vec256 x021 = Lib_IntVector_Intrinsics_vec256_and(x011, mask261);
   Lib_IntVector_Intrinsics_vec256 x311 = Lib_IntVector_Intrinsics_vec256_add64(x33, z021);
   Lib_IntVector_Intrinsics_vec256 x121 = Lib_IntVector_Intrinsics_vec256_add64(x111, z131);
   Lib_IntVector_Intrinsics_vec256
-  z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, (uint32_t)26U);
+  z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, 26U);
   Lib_IntVector_Intrinsics_vec256 x321 = Lib_IntVector_Intrinsics_vec256_and(x311, mask261);
   Lib_IntVector_Intrinsics_vec256 x421 = Lib_IntVector_Intrinsics_vec256_add64(x411, z031);
   Lib_IntVector_Intrinsics_vec256 r30 = x021;
@@ -441,14 +418,10 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   v34344 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r44, r34);
   Lib_IntVector_Intrinsics_vec256
   r12344 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v34344, v12124);
-  Lib_IntVector_Intrinsics_vec256
-  r123451 = Lib_IntVector_Intrinsics_vec256_smul64(r12341, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123452 = Lib_IntVector_Intrinsics_vec256_smul64(r12342, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123453 = Lib_IntVector_Intrinsics_vec256_smul64(r12343, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123454 = Lib_IntVector_Intrinsics_vec256_smul64(r12344, (uint64_t)5U);
+  Lib_IntVector_Intrinsics_vec256 r123451 = Lib_IntVector_Intrinsics_vec256_smul64(r12341, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123452 = Lib_IntVector_Intrinsics_vec256_smul64(r12342, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123453 = Lib_IntVector_Intrinsics_vec256_smul64(r12343, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123454 = Lib_IntVector_Intrinsics_vec256_smul64(r12344, 5ULL);
   Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_mul64(r12340, a0);
   Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_mul64(r12341, a0);
   Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_mul64(r12342, a0);
@@ -539,37 +512,28 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t2 = a25;
   Lib_IntVector_Intrinsics_vec256 t3 = a35;
   Lib_IntVector_Intrinsics_vec256 t4 = a45;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z121 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z121);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -612,41 +576,36 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256
   tmp0 =
     Lib_IntVector_Intrinsics_vec256_and(l,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec256 l0 = Lib_IntVector_Intrinsics_vec256_add64(v21, c0);
   Lib_IntVector_Intrinsics_vec256
   tmp1 =
     Lib_IntVector_Intrinsics_vec256_and(l0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(v22, c1);
   Lib_IntVector_Intrinsics_vec256
   tmp2 =
     Lib_IntVector_Intrinsics_vec256_and(l1,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(v23, c2);
   Lib_IntVector_Intrinsics_vec256
   tmp3 =
     Lib_IntVector_Intrinsics_vec256_and(l2,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(v24, c3);
   Lib_IntVector_Intrinsics_vec256
   tmp4 =
     Lib_IntVector_Intrinsics_vec256_and(l3,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec256
   o00 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec256 o1 = tmp1;
   Lib_IntVector_Intrinsics_vec256 o2 = tmp2;
   Lib_IntVector_Intrinsics_vec256 o3 = tmp3;
@@ -658,10 +617,11 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   out[4U] = o4;
 }
 
-void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key)
+void
+Hacl_MAC_Poly1305_Simd256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key)
 {
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   uint8_t *kr = key;
   acc[0U] = Lib_IntVector_Intrinsics_vec256_zero;
   acc[1U] = Lib_IntVector_Intrinsics_vec256_zero;
@@ -670,41 +630,38 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   acc[4U] = Lib_IntVector_Intrinsics_vec256_zero;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *rn = pre + (uint32_t)10U;
-  Lib_IntVector_Intrinsics_vec256 *rn_5 = pre + (uint32_t)15U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
+  Lib_IntVector_Intrinsics_vec256 *rn = pre + 10U;
+  Lib_IntVector_Intrinsics_vec256 *rn_5 = pre + 15U;
   Lib_IntVector_Intrinsics_vec256 r_vec0 = Lib_IntVector_Intrinsics_vec256_load64(lo1);
   Lib_IntVector_Intrinsics_vec256 r_vec1 = Lib_IntVector_Intrinsics_vec256_load64(hi1);
   Lib_IntVector_Intrinsics_vec256
   f00 =
     Lib_IntVector_Intrinsics_vec256_and(r_vec0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f15 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(r_vec1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, (uint32_t)40U);
+  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, 40U);
   Lib_IntVector_Intrinsics_vec256 f0 = f00;
   Lib_IntVector_Intrinsics_vec256 f1 = f15;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -720,11 +677,11 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f220 = r[2U];
   Lib_IntVector_Intrinsics_vec256 f230 = r[3U];
   Lib_IntVector_Intrinsics_vec256 f240 = r[4U];
-  r5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f200, (uint64_t)5U);
-  r5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f210, (uint64_t)5U);
-  r5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f220, (uint64_t)5U);
-  r5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f230, (uint64_t)5U);
-  r5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f240, (uint64_t)5U);
+  r5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f200, 5ULL);
+  r5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f210, 5ULL);
+  r5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f220, 5ULL);
+  r5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f230, 5ULL);
+  r5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f240, 5ULL);
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r10 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r20 = r[2U];
@@ -829,37 +786,30 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 t20 = a240;
   Lib_IntVector_Intrinsics_vec256 t30 = a340;
   Lib_IntVector_Intrinsics_vec256 t40 = a440;
-  Lib_IntVector_Intrinsics_vec256
-  mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, 26U);
+  Lib_IntVector_Intrinsics_vec256 z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 26U);
   Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
   Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
   Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
   Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
-  Lib_IntVector_Intrinsics_vec256
-  z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, 26U);
+  Lib_IntVector_Intrinsics_vec256 z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, 26U);
+  Lib_IntVector_Intrinsics_vec256 t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
   Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
   Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
   Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
   Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
+  Lib_IntVector_Intrinsics_vec256 z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
+  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, 26U);
   Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
   Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
   Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
   Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
   Lib_IntVector_Intrinsics_vec256
-  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
+  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, 26U);
   Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
   Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
   Lib_IntVector_Intrinsics_vec256 o00 = x020;
@@ -877,11 +827,11 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f221 = rn[2U];
   Lib_IntVector_Intrinsics_vec256 f231 = rn[3U];
   Lib_IntVector_Intrinsics_vec256 f241 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f201, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f211, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f221, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f231, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f241, (uint64_t)5U);
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f201, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f211, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f221, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f231, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f241, 5ULL);
   Lib_IntVector_Intrinsics_vec256 r00 = rn[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -980,37 +930,28 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 t2 = a24;
   Lib_IntVector_Intrinsics_vec256 t3 = a34;
   Lib_IntVector_Intrinsics_vec256 t4 = a44;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z120);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1028,277 +969,57 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f22 = rn[2U];
   Lib_IntVector_Intrinsics_vec256 f23 = rn[3U];
   Lib_IntVector_Intrinsics_vec256 f24 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f202, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f21, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f22, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f23, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f24, (uint64_t)5U);
-}
-
-void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *text)
-{
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
-  uint64_t u0 = load64_le(text);
-  uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
-  uint64_t hi = u;
-  Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
-  Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
-  Lib_IntVector_Intrinsics_vec256
-  f010 =
-    Lib_IntVector_Intrinsics_vec256_and(f0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f110 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)52U),
-      Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
-  Lib_IntVector_Intrinsics_vec256
-  f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
-  Lib_IntVector_Intrinsics_vec256 f01 = f010;
-  Lib_IntVector_Intrinsics_vec256 f111 = f110;
-  Lib_IntVector_Intrinsics_vec256 f2 = f20;
-  Lib_IntVector_Intrinsics_vec256 f3 = f30;
-  Lib_IntVector_Intrinsics_vec256 f41 = f40;
-  e[0U] = f01;
-  e[1U] = f111;
-  e[2U] = f2;
-  e[3U] = f3;
-  e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
-  Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-  Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
-  e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-  Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
-  Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
-  Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
-  Lib_IntVector_Intrinsics_vec256 r3 = r[3U];
-  Lib_IntVector_Intrinsics_vec256 r4 = r[4U];
-  Lib_IntVector_Intrinsics_vec256 r51 = r5[1U];
-  Lib_IntVector_Intrinsics_vec256 r52 = r5[2U];
-  Lib_IntVector_Intrinsics_vec256 r53 = r5[3U];
-  Lib_IntVector_Intrinsics_vec256 r54 = r5[4U];
-  Lib_IntVector_Intrinsics_vec256 f10 = e[0U];
-  Lib_IntVector_Intrinsics_vec256 f11 = e[1U];
-  Lib_IntVector_Intrinsics_vec256 f12 = e[2U];
-  Lib_IntVector_Intrinsics_vec256 f13 = e[3U];
-  Lib_IntVector_Intrinsics_vec256 f14 = e[4U];
-  Lib_IntVector_Intrinsics_vec256 a0 = acc[0U];
-  Lib_IntVector_Intrinsics_vec256 a1 = acc[1U];
-  Lib_IntVector_Intrinsics_vec256 a2 = acc[2U];
-  Lib_IntVector_Intrinsics_vec256 a3 = acc[3U];
-  Lib_IntVector_Intrinsics_vec256 a4 = acc[4U];
-  Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_add64(a0, f10);
-  Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, f11);
-  Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, f12);
-  Lib_IntVector_Intrinsics_vec256 a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, f13);
-  Lib_IntVector_Intrinsics_vec256 a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, f14);
-  Lib_IntVector_Intrinsics_vec256 a02 = Lib_IntVector_Intrinsics_vec256_mul64(r0, a01);
-  Lib_IntVector_Intrinsics_vec256 a12 = Lib_IntVector_Intrinsics_vec256_mul64(r1, a01);
-  Lib_IntVector_Intrinsics_vec256 a22 = Lib_IntVector_Intrinsics_vec256_mul64(r2, a01);
-  Lib_IntVector_Intrinsics_vec256 a32 = Lib_IntVector_Intrinsics_vec256_mul64(r3, a01);
-  Lib_IntVector_Intrinsics_vec256 a42 = Lib_IntVector_Intrinsics_vec256_mul64(r4, a01);
-  Lib_IntVector_Intrinsics_vec256
-  a03 =
-    Lib_IntVector_Intrinsics_vec256_add64(a02,
-      Lib_IntVector_Intrinsics_vec256_mul64(r54, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a13 =
-    Lib_IntVector_Intrinsics_vec256_add64(a12,
-      Lib_IntVector_Intrinsics_vec256_mul64(r0, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a23 =
-    Lib_IntVector_Intrinsics_vec256_add64(a22,
-      Lib_IntVector_Intrinsics_vec256_mul64(r1, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a33 =
-    Lib_IntVector_Intrinsics_vec256_add64(a32,
-      Lib_IntVector_Intrinsics_vec256_mul64(r2, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a43 =
-    Lib_IntVector_Intrinsics_vec256_add64(a42,
-      Lib_IntVector_Intrinsics_vec256_mul64(r3, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a04 =
-    Lib_IntVector_Intrinsics_vec256_add64(a03,
-      Lib_IntVector_Intrinsics_vec256_mul64(r53, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a14 =
-    Lib_IntVector_Intrinsics_vec256_add64(a13,
-      Lib_IntVector_Intrinsics_vec256_mul64(r54, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a24 =
-    Lib_IntVector_Intrinsics_vec256_add64(a23,
-      Lib_IntVector_Intrinsics_vec256_mul64(r0, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a34 =
-    Lib_IntVector_Intrinsics_vec256_add64(a33,
-      Lib_IntVector_Intrinsics_vec256_mul64(r1, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a44 =
-    Lib_IntVector_Intrinsics_vec256_add64(a43,
-      Lib_IntVector_Intrinsics_vec256_mul64(r2, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a05 =
-    Lib_IntVector_Intrinsics_vec256_add64(a04,
-      Lib_IntVector_Intrinsics_vec256_mul64(r52, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a15 =
-    Lib_IntVector_Intrinsics_vec256_add64(a14,
-      Lib_IntVector_Intrinsics_vec256_mul64(r53, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a25 =
-    Lib_IntVector_Intrinsics_vec256_add64(a24,
-      Lib_IntVector_Intrinsics_vec256_mul64(r54, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a35 =
-    Lib_IntVector_Intrinsics_vec256_add64(a34,
-      Lib_IntVector_Intrinsics_vec256_mul64(r0, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a45 =
-    Lib_IntVector_Intrinsics_vec256_add64(a44,
-      Lib_IntVector_Intrinsics_vec256_mul64(r1, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a06 =
-    Lib_IntVector_Intrinsics_vec256_add64(a05,
-      Lib_IntVector_Intrinsics_vec256_mul64(r51, a41));
-  Lib_IntVector_Intrinsics_vec256
-  a16 =
-    Lib_IntVector_Intrinsics_vec256_add64(a15,
-      Lib_IntVector_Intrinsics_vec256_mul64(r52, a41));
-  Lib_IntVector_Intrinsics_vec256
-  a26 =
-    Lib_IntVector_Intrinsics_vec256_add64(a25,
-      Lib_IntVector_Intrinsics_vec256_mul64(r53, a41));
-  Lib_IntVector_Intrinsics_vec256
-  a36 =
-    Lib_IntVector_Intrinsics_vec256_add64(a35,
-      Lib_IntVector_Intrinsics_vec256_mul64(r54, a41));
-  Lib_IntVector_Intrinsics_vec256
-  a46 =
-    Lib_IntVector_Intrinsics_vec256_add64(a45,
-      Lib_IntVector_Intrinsics_vec256_mul64(r0, a41));
-  Lib_IntVector_Intrinsics_vec256 t0 = a06;
-  Lib_IntVector_Intrinsics_vec256 t1 = a16;
-  Lib_IntVector_Intrinsics_vec256 t2 = a26;
-  Lib_IntVector_Intrinsics_vec256 t3 = a36;
-  Lib_IntVector_Intrinsics_vec256 t4 = a46;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
-  Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
-  Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
-  Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
-  Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
-  Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
-  Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
-  Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
-  Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
-  Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
-  Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
-  Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
-  Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
-  Lib_IntVector_Intrinsics_vec256 o0 = x02;
-  Lib_IntVector_Intrinsics_vec256 o1 = x12;
-  Lib_IntVector_Intrinsics_vec256 o2 = x21;
-  Lib_IntVector_Intrinsics_vec256 o3 = x32;
-  Lib_IntVector_Intrinsics_vec256 o4 = x42;
-  acc[0U] = o0;
-  acc[1U] = o1;
-  acc[2U] = o2;
-  acc[3U] = o3;
-  acc[4U] = o4;
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f202, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f21, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f22, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f23, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f24, 5ULL);
 }
 
-void
-Hacl_Poly1305_256_poly1305_update(
-  Lib_IntVector_Intrinsics_vec256 *ctx,
-  uint32_t len,
-  uint8_t *text
-)
+static void poly1305_update(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t *text)
 {
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  uint32_t sz_block = (uint32_t)64U;
+  uint32_t sz_block = 64U;
   uint32_t len0 = len / sz_block * sz_block;
   uint8_t *t0 = text;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)64U;
+    uint32_t bs = 64U;
     uint8_t *text0 = t0;
-    Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc, text0);
+    Hacl_MAC_Poly1305_Simd256_load_acc4(acc, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t0 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
       Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
+      Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + 32U);
       Lib_IntVector_Intrinsics_vec256
-      hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
-      Lib_IntVector_Intrinsics_vec256
-      mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+      mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
       Lib_IntVector_Intrinsics_vec256
       m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
       Lib_IntVector_Intrinsics_vec256
       m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-      Lib_IntVector_Intrinsics_vec256
-      m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-      Lib_IntVector_Intrinsics_vec256
-      m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+      Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+      Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
       Lib_IntVector_Intrinsics_vec256
       m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-      Lib_IntVector_Intrinsics_vec256
-      t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
+      Lib_IntVector_Intrinsics_vec256 t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 4U);
       Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
       Lib_IntVector_Intrinsics_vec256
-      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
+      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, 26U);
       Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
       Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
+      Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 30U);
       Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
       Lib_IntVector_Intrinsics_vec256 o00 = o5;
       Lib_IntVector_Intrinsics_vec256 o11 = o10;
       Lib_IntVector_Intrinsics_vec256 o21 = o20;
@@ -1309,12 +1030,12 @@ Hacl_Poly1305_256_poly1305_update(
       e[2U] = o21;
       e[3U] = o31;
       e[4U] = o41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
       Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec256 *rn = pre + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec256 *rn5 = pre + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec256 *rn = pre + 10U;
+      Lib_IntVector_Intrinsics_vec256 *rn5 = pre + 15U;
       Lib_IntVector_Intrinsics_vec256 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -1419,37 +1140,28 @@ Hacl_Poly1305_256_poly1305_update(
       Lib_IntVector_Intrinsics_vec256 t2 = a24;
       Lib_IntVector_Intrinsics_vec256 t3 = a34;
       Lib_IntVector_Intrinsics_vec256 t4 = a44;
-      Lib_IntVector_Intrinsics_vec256
-      mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec256
-      z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec256
-      z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
       Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec256
-      z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec256
-      z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec256 o01 = x02;
@@ -1483,45 +1195,41 @@ Hacl_Poly1305_256_poly1305_update(
       acc[3U] = o3;
       acc[4U] = o4;
     }
-    Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(acc, pre);
+    Hacl_MAC_Poly1305_Simd256_fmul_r4_normalize(acc, pre);
   }
   uint32_t len1 = len - len0;
   uint8_t *t1 = text + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t1 + i * (uint32_t)16U;
+    uint8_t *block = t1 + i * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1532,12 +1240,12 @@ Hacl_Poly1305_256_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1652,37 +1360,28 @@ Hacl_Poly1305_256_poly1305_update(
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1696,41 +1395,37 @@ Hacl_Poly1305_256_poly1305_update(
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = t1 + nb * (uint32_t)16U;
+    uint8_t *last = t1 + nb * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1741,12 +1436,12 @@ Hacl_Poly1305_256_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-    Lib_IntVector_Intrinsics_vec256 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec256 fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
     Lib_IntVector_Intrinsics_vec256 *r = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1861,37 +1556,28 @@ Hacl_Poly1305_256_poly1305_update(
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1909,14 +1595,14 @@ Hacl_Poly1305_256_poly1305_update(
 }
 
 void
-Hacl_Poly1305_256_poly1305_finish(
+Hacl_MAC_Poly1305_Simd256_poly1305_finish(
   uint8_t *tag,
   uint8_t *key,
   Lib_IntVector_Intrinsics_vec256 *ctx
 )
 {
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   Lib_IntVector_Intrinsics_vec256 f0 = acc[0U];
   Lib_IntVector_Intrinsics_vec256 f13 = acc[1U];
   Lib_IntVector_Intrinsics_vec256 f23 = acc[2U];
@@ -1927,41 +1613,36 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256
   tmp00 =
     Lib_IntVector_Intrinsics_vec256_and(l0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c00 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c00 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(f13, c00);
   Lib_IntVector_Intrinsics_vec256
   tmp10 =
     Lib_IntVector_Intrinsics_vec256_and(l1,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c10 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c10 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(f23, c10);
   Lib_IntVector_Intrinsics_vec256
   tmp20 =
     Lib_IntVector_Intrinsics_vec256_and(l2,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c20 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c20 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(f33, c20);
   Lib_IntVector_Intrinsics_vec256
   tmp30 =
     Lib_IntVector_Intrinsics_vec256_and(l3,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c30 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c30 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec256 l4 = Lib_IntVector_Intrinsics_vec256_add64(f40, c30);
   Lib_IntVector_Intrinsics_vec256
   tmp40 =
     Lib_IntVector_Intrinsics_vec256_and(l4,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c40 = Lib_IntVector_Intrinsics_vec256_shift_right64(l4, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c40 = Lib_IntVector_Intrinsics_vec256_shift_right64(l4, 26U);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp00,
-      Lib_IntVector_Intrinsics_vec256_smul64(c40, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c40, 5ULL));
   Lib_IntVector_Intrinsics_vec256 f110 = tmp10;
   Lib_IntVector_Intrinsics_vec256 f210 = tmp20;
   Lib_IntVector_Intrinsics_vec256 f310 = tmp30;
@@ -1971,49 +1652,42 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256
   tmp0 =
     Lib_IntVector_Intrinsics_vec256_and(l,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec256 l5 = Lib_IntVector_Intrinsics_vec256_add64(f110, c0);
   Lib_IntVector_Intrinsics_vec256
   tmp1 =
     Lib_IntVector_Intrinsics_vec256_and(l5,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l5, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l5, 26U);
   Lib_IntVector_Intrinsics_vec256 l6 = Lib_IntVector_Intrinsics_vec256_add64(f210, c1);
   Lib_IntVector_Intrinsics_vec256
   tmp2 =
     Lib_IntVector_Intrinsics_vec256_and(l6,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l6, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l6, 26U);
   Lib_IntVector_Intrinsics_vec256 l7 = Lib_IntVector_Intrinsics_vec256_add64(f310, c2);
   Lib_IntVector_Intrinsics_vec256
   tmp3 =
     Lib_IntVector_Intrinsics_vec256_and(l7,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l7, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l7, 26U);
   Lib_IntVector_Intrinsics_vec256 l8 = Lib_IntVector_Intrinsics_vec256_add64(f410, c3);
   Lib_IntVector_Intrinsics_vec256
   tmp4 =
     Lib_IntVector_Intrinsics_vec256_and(l8,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l8, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l8, 26U);
   Lib_IntVector_Intrinsics_vec256
   f02 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec256 f12 = tmp1;
   Lib_IntVector_Intrinsics_vec256 f22 = tmp2;
   Lib_IntVector_Intrinsics_vec256 f32 = tmp3;
   Lib_IntVector_Intrinsics_vec256 f42 = tmp4;
-  Lib_IntVector_Intrinsics_vec256
-  mh = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  ml = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffffbU);
+  Lib_IntVector_Intrinsics_vec256 mh = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 ml = Lib_IntVector_Intrinsics_vec256_load64(0x3fffffbULL);
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_eq64(f42, mh);
   Lib_IntVector_Intrinsics_vec256
   mask1 =
@@ -2053,36 +1727,334 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256 f2 = acc[2U];
   Lib_IntVector_Intrinsics_vec256 f3 = acc[3U];
   Lib_IntVector_Intrinsics_vec256 f4 = acc[4U];
-  uint64_t f01 = Lib_IntVector_Intrinsics_vec256_extract64(f00, (uint32_t)0U);
-  uint64_t f112 = Lib_IntVector_Intrinsics_vec256_extract64(f1, (uint32_t)0U);
-  uint64_t f212 = Lib_IntVector_Intrinsics_vec256_extract64(f2, (uint32_t)0U);
-  uint64_t f312 = Lib_IntVector_Intrinsics_vec256_extract64(f3, (uint32_t)0U);
-  uint64_t f41 = Lib_IntVector_Intrinsics_vec256_extract64(f4, (uint32_t)0U);
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t f01 = Lib_IntVector_Intrinsics_vec256_extract64(f00, 0U);
+  uint64_t f112 = Lib_IntVector_Intrinsics_vec256_extract64(f1, 0U);
+  uint64_t f212 = Lib_IntVector_Intrinsics_vec256_extract64(f2, 0U);
+  uint64_t f312 = Lib_IntVector_Intrinsics_vec256_extract64(f3, 0U);
+  uint64_t f41 = Lib_IntVector_Intrinsics_vec256_extract64(f4, 0U);
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
+}
+
+Hacl_MAC_Poly1305_Simd256_state_t *Hacl_MAC_Poly1305_Simd256_malloc(uint8_t *key)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  Lib_IntVector_Intrinsics_vec256
+  *r1 =
+    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 25U);
+  memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Lib_IntVector_Intrinsics_vec256 *block_state = r1;
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_0 = k_;
+  Hacl_MAC_Poly1305_Simd256_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
+  Hacl_MAC_Poly1305_Simd256_state_t
+  *p =
+    (Hacl_MAC_Poly1305_Simd256_state_t *)KRML_HOST_MALLOC(sizeof (
+        Hacl_MAC_Poly1305_Simd256_state_t
+      ));
+  p[0U] = s;
+  Hacl_MAC_Poly1305_Simd256_poly1305_init(block_state, key);
+  return p;
+}
+
+void Hacl_MAC_Poly1305_Simd256_reset(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *key)
+{
+  Hacl_MAC_Poly1305_Simd256_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
+  Hacl_MAC_Poly1305_Simd256_poly1305_init(block_state, key);
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_1 = k_;
+  Hacl_MAC_Poly1305_Simd256_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
+  state[0U] = tmp;
 }
 
-void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_MAC_Poly1305_Simd256_update(
+  Hacl_MAC_Poly1305_Simd256_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+)
+{
+  Hacl_MAC_Poly1305_Simd256_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    sz = 64U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  if (chunk_len <= 64U - sz)
+  {
+    Hacl_MAC_Poly1305_Simd256_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_MAC_Poly1305_Simd256_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 64U, buf);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len,
+          .p_key = k_1
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_MAC_Poly1305_Simd256_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec256 *block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 64U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd256_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+    Hacl_MAC_Poly1305_Simd256_state_t s10 = *state;
+    Lib_IntVector_Intrinsics_vec256 *block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint8_t *k_10 = s10.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 64U, buf);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff),
+          .p_key = k_10
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+void
+Hacl_MAC_Poly1305_Simd256_digest(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *output)
+{
+  Hacl_MAC_Poly1305_Simd256_state_t scrut = *state;
+  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint8_t *k_ = scrut.p_key;
+  uint32_t r;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    r = 64U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  uint8_t *buf_1 = buf_;
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 r1[25U] KRML_POST_ALIGN(32) = { 0U };
+  Lib_IntVector_Intrinsics_vec256 *tmp_block_state = r1;
+  memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  uint32_t ite0;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite0 = 16U;
+  }
+  else
+  {
+    ite0 = r % 16U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite0;
+  uint8_t *buf_multi = buf_1;
+  uint32_t ite;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite = 16U;
+  }
+  else
+  {
+    ite = r % 16U;
+  }
+  poly1305_update(tmp_block_state, r - ite, buf_multi);
+  uint32_t ite1;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite1 = 16U;
+  }
+  else
+  {
+    ite1 = r % 16U;
+  }
+  poly1305_update(tmp_block_state, ite1, buf_last);
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 tmp[25U] KRML_POST_ALIGN(32) = { 0U };
+  memcpy(tmp, tmp_block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Hacl_MAC_Poly1305_Simd256_poly1305_finish(output, k_, tmp);
+}
+
+void Hacl_MAC_Poly1305_Simd256_free(Hacl_MAC_Poly1305_Simd256_state_t *state)
+{
+  Hacl_MAC_Poly1305_Simd256_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
+  KRML_HOST_FREE(k_);
+  KRML_ALIGNED_FREE(block_state);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+void
+Hacl_MAC_Poly1305_Simd256_mac(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key
+)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[25U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Poly1305_256_poly1305_init(ctx, key);
-  Hacl_Poly1305_256_poly1305_update(ctx, len, text);
-  Hacl_Poly1305_256_poly1305_finish(tag, key, ctx);
+  Hacl_MAC_Poly1305_Simd256_poly1305_init(ctx, key);
+  poly1305_update(ctx, input_len, input);
+  Hacl_MAC_Poly1305_Simd256_poly1305_finish(output, key, ctx);
 }
 
diff --git a/src/Hacl_NaCl.c b/src/Hacl_NaCl.c
index 37104040..a1bbd25c 100644
--- a/src/Hacl_NaCl.c
+++ b/src/Hacl_NaCl.c
@@ -30,9 +30,9 @@
 static void secretbox_init(uint8_t *xkeys, uint8_t *k, uint8_t *n)
 {
   uint8_t *subkey = xkeys;
-  uint8_t *aekey = xkeys + (uint32_t)32U;
+  uint8_t *aekey = xkeys + 32U;
   uint8_t *n0 = n;
-  uint8_t *n1 = n + (uint32_t)16U;
+  uint8_t *n1 = n + 16U;
   Hacl_Salsa20_hsalsa20(subkey, k, n0);
   Hacl_Salsa20_salsa20_key_block0(aekey, subkey, n1);
 }
@@ -42,35 +42,35 @@ secretbox_detached(uint32_t mlen, uint8_t *c, uint8_t *tag, uint8_t *k, uint8_t
 {
   uint8_t xkeys[96U] = { 0U };
   secretbox_init(xkeys, k, n);
-  uint8_t *mkey = xkeys + (uint32_t)32U;
-  uint8_t *n1 = n + (uint32_t)16U;
+  uint8_t *mkey = xkeys + 32U;
+  uint8_t *n1 = n + 16U;
   uint8_t *subkey = xkeys;
-  uint8_t *ekey0 = xkeys + (uint32_t)64U;
+  uint8_t *ekey0 = xkeys + 64U;
   uint32_t mlen0;
-  if (mlen <= (uint32_t)32U)
+  if (mlen <= 32U)
   {
     mlen0 = mlen;
   }
   else
   {
-    mlen0 = (uint32_t)32U;
+    mlen0 = 32U;
   }
   uint32_t mlen1 = mlen - mlen0;
   uint8_t *m0 = m;
   uint8_t *m1 = m + mlen0;
   uint8_t block0[32U] = { 0U };
   memcpy(block0, m0, mlen0 * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = block0;
-    uint8_t x = block0[i] ^ ekey0[i];
+    uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i];
     os[i] = x;
   }
   uint8_t *c0 = c;
   uint8_t *c1 = c + mlen0;
   memcpy(c0, block0, mlen0 * sizeof (uint8_t));
-  Hacl_Salsa20_salsa20_encrypt(mlen1, c1, m1, subkey, n1, (uint32_t)1U);
-  Hacl_Poly1305_32_poly1305_mac(tag, mlen, c, mkey);
+  Hacl_Salsa20_salsa20_encrypt(mlen1, c1, m1, subkey, n1, 1U);
+  Hacl_MAC_Poly1305_mac(tag, c, mlen, mkey);
 }
 
 static uint32_t
@@ -85,55 +85,55 @@ secretbox_open_detached(
 {
   uint8_t xkeys[96U] = { 0U };
   secretbox_init(xkeys, k, n);
-  uint8_t *mkey = xkeys + (uint32_t)32U;
+  uint8_t *mkey = xkeys + 32U;
   uint8_t tag_[16U] = { 0U };
-  Hacl_Poly1305_32_poly1305_mac(tag_, mlen, c, mkey);
-  uint8_t res = (uint8_t)255U;
+  Hacl_MAC_Poly1305_mac(tag_, c, mlen, mkey);
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(tag[i], tag_[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
     uint8_t *subkey = xkeys;
-    uint8_t *ekey0 = xkeys + (uint32_t)64U;
-    uint8_t *n1 = n + (uint32_t)16U;
+    uint8_t *ekey0 = xkeys + 64U;
+    uint8_t *n1 = n + 16U;
     uint32_t mlen0;
-    if (mlen <= (uint32_t)32U)
+    if (mlen <= 32U)
     {
       mlen0 = mlen;
     }
     else
     {
-      mlen0 = (uint32_t)32U;
+      mlen0 = 32U;
     }
     uint32_t mlen1 = mlen - mlen0;
     uint8_t *c0 = c;
     uint8_t *c1 = c + mlen0;
     uint8_t block0[32U] = { 0U };
     memcpy(block0, c0, mlen0 * sizeof (uint8_t));
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t *os = block0;
-      uint8_t x = block0[i] ^ ekey0[i];
+      uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i];
       os[i] = x;
     }
     uint8_t *m0 = m;
     uint8_t *m1 = m + mlen0;
     memcpy(m0, block0, mlen0 * sizeof (uint8_t));
-    Hacl_Salsa20_salsa20_decrypt(mlen1, m1, c1, subkey, n1, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Salsa20_salsa20_decrypt(mlen1, m1, c1, subkey, n1, 1U);
+    return 0U;
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static void secretbox_easy(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   secretbox_detached(mlen, cip, tag, k, n, m);
 }
 
@@ -141,7 +141,7 @@ static uint32_t
 secretbox_open_easy(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return secretbox_open_detached(mlen, m, k, n, cip, tag);
 }
 
@@ -152,9 +152,9 @@ static inline uint32_t box_beforenm(uint8_t *k, uint8_t *pk, uint8_t *sk)
   if (r)
   {
     Hacl_Salsa20_hsalsa20(k, k, n0);
-    return (uint32_t)0U;
+    return 0U;
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
@@ -168,7 +168,7 @@ box_detached_afternm(
 )
 {
   secretbox_detached(mlen, c, tag, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 static inline uint32_t
@@ -184,11 +184,11 @@ box_detached(
 {
   uint8_t k[32U] = { 0U };
   uint32_t r = box_beforenm(k, pk, sk);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return box_detached_afternm(mlen, c, tag, k, n, m);
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
@@ -217,18 +217,18 @@ box_open_detached(
 {
   uint8_t k[32U] = { 0U };
   uint32_t r = box_beforenm(k, pk, sk);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return box_open_detached_afternm(mlen, m, k, n, c, tag);
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
 box_easy_afternm(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   uint32_t res = box_detached_afternm(mlen, cip, tag, k, n, m);
   return res;
 }
@@ -237,7 +237,7 @@ static inline uint32_t
 box_easy(uint32_t mlen, uint8_t *c, uint8_t *sk, uint8_t *pk, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   uint32_t res = box_detached(mlen, cip, tag, sk, pk, n, m);
   return res;
 }
@@ -246,7 +246,7 @@ static inline uint32_t
 box_open_easy_afternm(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return box_open_detached_afternm(mlen, m, k, n, cip, tag);
 }
 
@@ -254,7 +254,7 @@ static inline uint32_t
 box_open_easy(uint32_t mlen, uint8_t *m, uint8_t *pk, uint8_t *sk, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return box_open_detached(mlen, m, pk, sk, n, cip, tag);
 }
 
@@ -281,7 +281,7 @@ Hacl_NaCl_crypto_secretbox_detached(
 )
 {
   secretbox_detached(mlen, c, tag, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 /**
@@ -322,7 +322,7 @@ uint32_t
 Hacl_NaCl_crypto_secretbox_easy(uint8_t *c, uint8_t *m, uint32_t mlen, uint8_t *n, uint8_t *k)
 {
   secretbox_easy(mlen, c, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 /**
@@ -343,7 +343,7 @@ Hacl_NaCl_crypto_secretbox_open_easy(
   uint8_t *k
 )
 {
-  return secretbox_open_easy(clen - (uint32_t)16U, m, k, n, c);
+  return secretbox_open_easy(clen - 16U, m, k, n, c);
 }
 
 /**
@@ -490,7 +490,7 @@ Hacl_NaCl_crypto_box_open_easy_afternm(
   uint8_t *k
 )
 {
-  return box_open_easy_afternm(clen - (uint32_t)16U, m, k, n, c);
+  return box_open_easy_afternm(clen - 16U, m, k, n, c);
 }
 
 /**
@@ -513,6 +513,6 @@ Hacl_NaCl_crypto_box_open_easy(
   uint8_t *sk
 )
 {
-  return box_open_easy(clen - (uint32_t)16U, m, pk, sk, n, c);
+  return box_open_easy(clen - 16U, m, pk, sk, n, c);
 }
 
diff --git a/src/Hacl_P256.c b/src/Hacl_P256.c
index 7e586e54..609fed81 100644
--- a/src/Hacl_P256.c
+++ b/src/Hacl_P256.c
@@ -33,11 +33,11 @@
 static inline uint64_t bn_is_zero_mask4(uint64_t *f)
 {
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -48,16 +48,16 @@ static inline uint64_t bn_is_zero_mask4(uint64_t *f)
 static inline bool bn_is_zero_vartime4(uint64_t *f)
 {
   uint64_t m = bn_is_zero_mask4(f);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline uint64_t bn_is_eq_mask4(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -67,16 +67,16 @@ static inline uint64_t bn_is_eq_mask4(uint64_t *a, uint64_t *b)
 static inline bool bn_is_eq_vartime4(uint64_t *a, uint64_t *b)
 {
   uint64_t m = bn_is_eq_mask4(a, b);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline void bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_t *y)
 {
-  uint64_t mask = ~FStar_UInt64_eq_mask(cin, (uint64_t)0U);
+  uint64_t mask = ~FStar_UInt64_eq_mask(cin, 0ULL);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t uu____0 = x[i];
     uint64_t x1 = uu____0 ^ (mask & (y[i] ^ uu____0));
@@ -85,52 +85,52 @@ static inline void bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_
 
 static inline void bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -138,23 +138,23 @@ static inline void bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t
 
 static inline uint64_t bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c0 = c;
@@ -163,53 +163,53 @@ static inline uint64_t bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y)
 
 static inline void bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x1;);
@@ -217,59 +217,59 @@ static inline void bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t
 
 static inline void bn_mul4(uint64_t *res, uint64_t *x, uint64_t *y)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = y[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = x[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = x[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = x[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = x[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = x[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 static inline void bn_sqr4(uint64_t *res, uint64_t *x)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = x;
     uint64_t a_j = x[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -277,41 +277,37 @@ static inline void bn_sqr4(uint64_t *res, uint64_t *x)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(x[i], x[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void bn_to_bytes_be4(uint8_t *res, uint64_t *f)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(res + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(res + i * 8U, f[4U - i - 1U]););
 }
 
 static inline void bn_from_bytes_be4(uint64_t *res, uint8_t *b)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
 }
@@ -319,79 +315,79 @@ static inline void bn_from_bytes_be4(uint64_t *res, uint8_t *b)
 static inline void bn2_to_bytes_be4(uint8_t *res, uint64_t *x, uint64_t *y)
 {
   bn_to_bytes_be4(res, x);
-  bn_to_bytes_be4(res + (uint32_t)32U, y);
+  bn_to_bytes_be4(res + 32U, y);
 }
 
 static inline void make_prime(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xffffffffffffffffU;
-  n[1U] = (uint64_t)0xffffffffU;
-  n[2U] = (uint64_t)0x0U;
-  n[3U] = (uint64_t)0xffffffff00000001U;
+  n[0U] = 0xffffffffffffffffULL;
+  n[1U] = 0xffffffffULL;
+  n[2U] = 0x0ULL;
+  n[3U] = 0xffffffff00000001ULL;
 }
 
 static inline void make_order(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xf3b9cac2fc632551U;
-  n[1U] = (uint64_t)0xbce6faada7179e84U;
-  n[2U] = (uint64_t)0xffffffffffffffffU;
-  n[3U] = (uint64_t)0xffffffff00000000U;
+  n[0U] = 0xf3b9cac2fc632551ULL;
+  n[1U] = 0xbce6faada7179e84ULL;
+  n[2U] = 0xffffffffffffffffULL;
+  n[3U] = 0xffffffff00000000ULL;
 }
 
 static inline void make_a_coeff(uint64_t *a)
 {
-  a[0U] = (uint64_t)0xfffffffffffffffcU;
-  a[1U] = (uint64_t)0x3ffffffffU;
-  a[2U] = (uint64_t)0x0U;
-  a[3U] = (uint64_t)0xfffffffc00000004U;
+  a[0U] = 0xfffffffffffffffcULL;
+  a[1U] = 0x3ffffffffULL;
+  a[2U] = 0x0ULL;
+  a[3U] = 0xfffffffc00000004ULL;
 }
 
 static inline void make_b_coeff(uint64_t *b)
 {
-  b[0U] = (uint64_t)0xd89cdf6229c4bddfU;
-  b[1U] = (uint64_t)0xacf005cd78843090U;
-  b[2U] = (uint64_t)0xe5a220abf7212ed6U;
-  b[3U] = (uint64_t)0xdc30061d04874834U;
+  b[0U] = 0xd89cdf6229c4bddfULL;
+  b[1U] = 0xacf005cd78843090ULL;
+  b[2U] = 0xe5a220abf7212ed6ULL;
+  b[3U] = 0xdc30061d04874834ULL;
 }
 
 static inline void make_g_x(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x79e730d418a9143cU;
-  n[1U] = (uint64_t)0x75ba95fc5fedb601U;
-  n[2U] = (uint64_t)0x79fb732b77622510U;
-  n[3U] = (uint64_t)0x18905f76a53755c6U;
+  n[0U] = 0x79e730d418a9143cULL;
+  n[1U] = 0x75ba95fc5fedb601ULL;
+  n[2U] = 0x79fb732b77622510ULL;
+  n[3U] = 0x18905f76a53755c6ULL;
 }
 
 static inline void make_g_y(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xddf25357ce95560aU;
-  n[1U] = (uint64_t)0x8b4ab8e4ba19e45cU;
-  n[2U] = (uint64_t)0xd2e88688dd21f325U;
-  n[3U] = (uint64_t)0x8571ff1825885d85U;
+  n[0U] = 0xddf25357ce95560aULL;
+  n[1U] = 0x8b4ab8e4ba19e45cULL;
+  n[2U] = 0xd2e88688dd21f325ULL;
+  n[3U] = 0x8571ff1825885d85ULL;
 }
 
 static inline void make_fmont_R2(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x3U;
-  n[1U] = (uint64_t)0xfffffffbffffffffU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0x4fffffffdU;
+  n[0U] = 0x3ULL;
+  n[1U] = 0xfffffffbffffffffULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0x4fffffffdULL;
 }
 
 static inline void make_fzero(uint64_t *n)
 {
-  n[0U] = (uint64_t)0U;
-  n[1U] = (uint64_t)0U;
-  n[2U] = (uint64_t)0U;
-  n[3U] = (uint64_t)0U;
+  n[0U] = 0ULL;
+  n[1U] = 0ULL;
+  n[2U] = 0ULL;
+  n[3U] = 0ULL;
 }
 
 static inline void make_fone(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x1U;
-  n[1U] = (uint64_t)0xffffffff00000000U;
-  n[2U] = (uint64_t)0xffffffffffffffffU;
-  n[3U] = (uint64_t)0xfffffffeU;
+  n[0U] = 0x1ULL;
+  n[1U] = 0xffffffff00000000ULL;
+  n[2U] = 0xffffffffffffffffULL;
+  n[3U] = 0xfffffffeULL;
 }
 
 static inline uint64_t bn_is_lt_prime_mask4(uint64_t *f)
@@ -399,7 +395,7 @@ static inline uint64_t bn_is_lt_prime_mask4(uint64_t *f)
   uint64_t tmp[4U] = { 0U };
   make_prime(tmp);
   uint64_t c = bn_sub4(tmp, f, tmp);
-  return (uint64_t)0U - c;
+  return 0ULL - c;
 }
 
 static inline uint64_t feq_mask(uint64_t *a, uint64_t *b)
@@ -435,61 +431,61 @@ static inline void mont_reduction(uint64_t *res, uint64_t *x)
 {
   uint64_t n[4U] = { 0U };
   make_prime(n);
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = (uint64_t)1U * x[i0];
+    0U,
+    4U,
+    1U,
+    uint64_t qj = 1ULL * x[i0];
     uint64_t *res_j0 = x + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = x + (uint32_t)4U + i0;
-    uint64_t res_j = x[(uint32_t)4U + i0];
+    uint64_t *resb = x + 4U + i0;
+    uint64_t res_j = x[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, x + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -512,7 +508,7 @@ static inline void fsqr0(uint64_t *res, uint64_t *x)
 static inline void from_mont(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, a, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, a, 4U * sizeof (uint64_t));
   mont_reduction(res, tmp);
 }
 
@@ -540,105 +536,105 @@ static inline void finv(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *x30 = tmp;
-  uint64_t *x2 = tmp + (uint32_t)4U;
-  uint64_t *tmp1 = tmp + (uint32_t)8U;
-  uint64_t *tmp2 = tmp + (uint32_t)12U;
-  memcpy(x2, a, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *x2 = tmp + 4U;
+  uint64_t *tmp1 = tmp + 8U;
+  uint64_t *tmp2 = tmp + 12U;
+  memcpy(x2, a, 4U * sizeof (uint64_t));
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, a);
-  memcpy(x30, x2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x30, x2, 4U * sizeof (uint64_t));
   {
     fsqr0(x30, x30);
   }
   fmul0(x30, x30, a);
-  memcpy(tmp1, x30, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, x30, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x30);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x30);
-  memcpy(x30, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR15(i, (uint32_t)0U, (uint32_t)15U, (uint32_t)1U, fsqr0(x30, x30););
+  memcpy(x30, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR15(i, 0U, 15U, 1U, fsqr0(x30, x30););
   fmul0(x30, x30, tmp1);
-  memcpy(tmp1, x30, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, x30, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x2);
-  memcpy(x2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  memcpy(x2, tmp1, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, tmp1);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, tmp1);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)30U; i++)
+  for (uint32_t i = 0U; i < 30U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, x30);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(x2, x2););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(x2, x2););
   fmul0(tmp1, x2, a);
-  memcpy(res, tmp1, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, tmp1, 4U * sizeof (uint64_t));
 }
 
 static inline void fsqrt(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)4U;
-  memcpy(tmp1, a, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *tmp2 = tmp + 4U;
+  memcpy(tmp1, a, 4U * sizeof (uint64_t));
   {
     fsqr0(tmp1, tmp1);
   }
   fmul0(tmp1, tmp1, a);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, tmp2);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, tmp2);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
   fmul0(tmp2, tmp2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)96U; i++)
+  for (uint32_t i = 0U; i < 96U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
   fmul0(tmp2, tmp2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)94U; i++)
+  for (uint32_t i = 0U; i < 94U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
-  memcpy(res, tmp2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, tmp2, 4U * sizeof (uint64_t));
 }
 
 static inline void make_base_point(uint64_t *p)
 {
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z = p + 8U;
   make_g_x(x);
   make_g_y(y);
   make_fone(z);
@@ -647,8 +643,8 @@ static inline void make_base_point(uint64_t *p)
 static inline void make_point_at_inf(uint64_t *p)
 {
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z = p + 8U;
   make_fzero(x);
   make_fone(y);
   make_fzero(z);
@@ -656,7 +652,7 @@ static inline void make_point_at_inf(uint64_t *p)
 
 static inline bool is_point_at_inf_vartime(uint64_t *p)
 {
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *pz = p + 8U;
   return bn_is_zero_vartime4(pz);
 }
 
@@ -664,10 +660,10 @@ static inline void to_aff_point(uint64_t *res, uint64_t *p)
 {
   uint64_t zinv[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *py = p + 4U;
+  uint64_t *pz = p + 8U;
   uint64_t *x = res;
-  uint64_t *y = res + (uint32_t)4U;
+  uint64_t *y = res + 4U;
   finv(zinv, pz);
   fmul0(x, px, zinv);
   fmul0(y, py, zinv);
@@ -679,7 +675,7 @@ static inline void to_aff_point_x(uint64_t *res, uint64_t *p)
 {
   uint64_t zinv[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *pz = p + 8U;
   finv(zinv, pz);
   fmul0(res, px, zinv);
   from_mont(res, res);
@@ -688,10 +684,10 @@ static inline void to_aff_point_x(uint64_t *res, uint64_t *p)
 static inline void to_proj_point(uint64_t *res, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   uint64_t *rx = res;
-  uint64_t *ry = res + (uint32_t)4U;
-  uint64_t *rz = res + (uint32_t)8U;
+  uint64_t *ry = res + 4U;
+  uint64_t *rz = res + 8U;
   to_mont(rx, px);
   to_mont(ry, py);
   make_fone(rz);
@@ -703,7 +699,7 @@ static inline bool is_on_curve_vartime(uint64_t *p)
   uint64_t tx[4U] = { 0U };
   uint64_t ty[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   to_mont(tx, px);
   to_mont(ty, py);
   uint64_t tmp[4U] = { 0U };
@@ -715,14 +711,14 @@ static inline bool is_on_curve_vartime(uint64_t *p)
   fadd0(rp, tmp, rp);
   fsqr0(ty, ty);
   uint64_t r = feq_mask(ty, rp);
-  bool r0 = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool r0 = r == 0xFFFFFFFFFFFFFFFFULL;
   return r0;
 }
 
 static inline void aff_point_store(uint8_t *res, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   bn2_to_bytes_be4(res, px, py);
 }
 
@@ -736,17 +732,17 @@ static inline void point_store(uint8_t *res, uint64_t *p)
 static inline bool aff_point_load_vartime(uint64_t *p, uint8_t *b)
 {
   uint8_t *p_x = b;
-  uint8_t *p_y = b + (uint32_t)32U;
+  uint8_t *p_y = b + 32U;
   uint64_t *bn_p_x = p;
-  uint64_t *bn_p_y = p + (uint32_t)4U;
+  uint64_t *bn_p_y = p + 4U;
   bn_from_bytes_be4(bn_p_x, p_x);
   bn_from_bytes_be4(bn_p_y, p_y);
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   uint64_t lessX = bn_is_lt_prime_mask4(px);
   uint64_t lessY = bn_is_lt_prime_mask4(py);
   uint64_t res = lessX & lessY;
-  bool is_xy_valid = res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_xy_valid = res == 0xFFFFFFFFFFFFFFFFULL;
   if (!is_xy_valid)
   {
     return false;
@@ -769,15 +765,15 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
 {
   uint8_t s0 = s[0U];
   uint8_t s01 = s0;
-  if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U))
+  if (!(s01 == 0x02U || s01 == 0x03U))
   {
     return false;
   }
-  uint8_t *xb = s + (uint32_t)1U;
+  uint8_t *xb = s + 1U;
   bn_from_bytes_be4(x, xb);
   uint64_t is_x_valid = bn_is_lt_prime_mask4(x);
-  bool is_x_valid1 = is_x_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  bool is_y_odd = s01 == (uint8_t)0x03U;
+  bool is_x_valid1 = is_x_valid == 0xFFFFFFFFFFFFFFFFULL;
+  bool is_y_odd = s01 == 0x03U;
   if (!is_x_valid1)
   {
     return false;
@@ -797,14 +793,14 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
   from_mont(y, yM);
   fsqr0(yM, yM);
   uint64_t r = feq_mask(yM, y2M);
-  bool is_y_valid = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_y_valid = r == 0xFFFFFFFFFFFFFFFFULL;
   bool is_y_valid0 = is_y_valid;
   if (!is_y_valid0)
   {
     return false;
   }
-  uint64_t is_y_odd1 = y[0U] & (uint64_t)1U;
-  bool is_y_odd2 = is_y_odd1 == (uint64_t)1U;
+  uint64_t is_y_odd1 = y[0U] & 1ULL;
+  bool is_y_odd2 = is_y_odd1 == 1ULL;
   fnegate_conditional_vartime(y, is_y_odd2 != is_y_odd);
   return true;
 }
@@ -813,18 +809,18 @@ static inline void point_double(uint64_t *res, uint64_t *p)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *x = p;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *z = p + 8U;
   uint64_t *x3 = res;
-  uint64_t *y3 = res + (uint32_t)4U;
-  uint64_t *z3 = res + (uint32_t)8U;
+  uint64_t *y3 = res + 4U;
+  uint64_t *z3 = res + 8U;
   uint64_t *t0 = tmp;
-  uint64_t *t1 = tmp + (uint32_t)4U;
-  uint64_t *t2 = tmp + (uint32_t)8U;
-  uint64_t *t3 = tmp + (uint32_t)12U;
-  uint64_t *t4 = tmp + (uint32_t)16U;
+  uint64_t *t1 = tmp + 4U;
+  uint64_t *t2 = tmp + 8U;
+  uint64_t *t3 = tmp + 12U;
+  uint64_t *t4 = tmp + 16U;
   uint64_t *x1 = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z1 = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z1 = p + 8U;
   fsqr0(t0, x1);
   fsqr0(t1, y);
   fsqr0(t2, z1);
@@ -865,22 +861,22 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[36U] = { 0U };
   uint64_t *t0 = tmp;
-  uint64_t *t1 = tmp + (uint32_t)24U;
+  uint64_t *t1 = tmp + 24U;
   uint64_t *x3 = t1;
-  uint64_t *y3 = t1 + (uint32_t)4U;
-  uint64_t *z3 = t1 + (uint32_t)8U;
+  uint64_t *y3 = t1 + 4U;
+  uint64_t *z3 = t1 + 8U;
   uint64_t *t01 = t0;
-  uint64_t *t11 = t0 + (uint32_t)4U;
-  uint64_t *t2 = t0 + (uint32_t)8U;
-  uint64_t *t3 = t0 + (uint32_t)12U;
-  uint64_t *t4 = t0 + (uint32_t)16U;
-  uint64_t *t5 = t0 + (uint32_t)20U;
+  uint64_t *t11 = t0 + 4U;
+  uint64_t *t2 = t0 + 8U;
+  uint64_t *t3 = t0 + 12U;
+  uint64_t *t4 = t0 + 16U;
+  uint64_t *t5 = t0 + 20U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)4U;
-  uint64_t *z10 = p + (uint32_t)8U;
+  uint64_t *y1 = p + 4U;
+  uint64_t *z10 = p + 8U;
   uint64_t *x20 = q;
-  uint64_t *y20 = q + (uint32_t)4U;
-  uint64_t *z20 = q + (uint32_t)8U;
+  uint64_t *y20 = q + 4U;
+  uint64_t *z20 = q + 8U;
   fmul0(t01, x1, x20);
   fmul0(t11, y1, y20);
   fmul0(t2, z10, z20);
@@ -888,10 +884,10 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fadd0(t4, x20, y20);
   fmul0(t3, t3, t4);
   fadd0(t4, t01, t11);
-  uint64_t *y10 = p + (uint32_t)4U;
-  uint64_t *z11 = p + (uint32_t)8U;
-  uint64_t *y2 = q + (uint32_t)4U;
-  uint64_t *z21 = q + (uint32_t)8U;
+  uint64_t *y10 = p + 4U;
+  uint64_t *z11 = p + 8U;
+  uint64_t *y2 = q + 4U;
+  uint64_t *z21 = q + 8U;
   fsub0(t3, t3, t4);
   fadd0(t4, y10, z11);
   fadd0(t5, y2, z21);
@@ -899,9 +895,9 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fadd0(t5, t11, t2);
   fsub0(t4, t4, t5);
   uint64_t *x10 = p;
-  uint64_t *z1 = p + (uint32_t)8U;
+  uint64_t *z1 = p + 8U;
   uint64_t *x2 = q;
-  uint64_t *z2 = q + (uint32_t)8U;
+  uint64_t *z2 = q + 8U;
   fadd0(x3, x10, z1);
   fadd0(y3, x2, z2);
   fmul0(x3, x3, y3);
@@ -932,7 +928,7 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fmul0(z3, t4, z3);
   fmul0(t11, t3, t01);
   fadd0(z3, z3, t11);
-  memcpy(res, t1, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(res, t1, 12U * sizeof (uint64_t));
 }
 
 static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
@@ -940,41 +936,37 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
   uint64_t table[192U] = { 0U };
   uint64_t tmp[12U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)12U;
+  uint64_t *t1 = table + 12U;
   make_point_at_inf(t0);
-  memcpy(t1, p, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(t1, p, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 12U;
     point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;
+    memcpy(table + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 12U;
     point_add(tmp, p, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t)););
   make_point_at_inf(res);
   uint64_t tmp0[12U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)12U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, point_double(res, res););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 12U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)12U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 12U;
       KRML_MAYBE_FOR12(i,
-        (uint32_t)0U,
-        (uint32_t)12U,
-        (uint32_t)1U,
+        0U,
+        12U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -984,17 +976,17 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 12U;
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
       os[i] = x;););
@@ -1007,64 +999,58 @@ static inline void point_mul_g(uint64_t *res, uint64_t *scalar)
   uint64_t
   q2[12U] =
     {
-      (uint64_t)1499621593102562565U, (uint64_t)16692369783039433128U,
-      (uint64_t)15337520135922861848U, (uint64_t)5455737214495366228U,
-      (uint64_t)17827017231032529600U, (uint64_t)12413621606240782649U,
-      (uint64_t)2290483008028286132U, (uint64_t)15752017553340844820U,
-      (uint64_t)4846430910634234874U, (uint64_t)10861682798464583253U,
-      (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U
+      1499621593102562565ULL, 16692369783039433128ULL, 15337520135922861848ULL,
+      5455737214495366228ULL, 17827017231032529600ULL, 12413621606240782649ULL,
+      2290483008028286132ULL, 15752017553340844820ULL, 4846430910634234874ULL,
+      10861682798464583253ULL, 15404737222404363049ULL, 363586619281562022ULL
     };
   uint64_t
   q3[12U] =
     {
-      (uint64_t)14619254753077084366U, (uint64_t)13913835116514008593U,
-      (uint64_t)15060744674088488145U, (uint64_t)17668414598203068685U,
-      (uint64_t)10761169236902342334U, (uint64_t)15467027479157446221U,
-      (uint64_t)14989185522423469618U, (uint64_t)14354539272510107003U,
-      (uint64_t)14298211796392133693U, (uint64_t)13270323784253711450U,
-      (uint64_t)13380964971965046957U, (uint64_t)8686204248456909699U
+      14619254753077084366ULL, 13913835116514008593ULL, 15060744674088488145ULL,
+      17668414598203068685ULL, 10761169236902342334ULL, 15467027479157446221ULL,
+      14989185522423469618ULL, 14354539272510107003ULL, 14298211796392133693ULL,
+      13270323784253711450ULL, 13380964971965046957ULL, 8686204248456909699ULL
     };
   uint64_t
   q4[12U] =
     {
-      (uint64_t)7870395003430845958U, (uint64_t)18001862936410067720U,
-      (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U,
-      (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U,
-      (uint64_t)7139806720777708306U, (uint64_t)8253938546650739833U,
-      (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U,
-      (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U
+      7870395003430845958ULL, 18001862936410067720ULL, 8006461232116967215ULL,
+      5921313779532424762ULL, 10702113371959864307ULL, 8070517410642379879ULL,
+      7139806720777708306ULL, 8253938546650739833ULL, 17490482834545705718ULL,
+      1065249776797037500ULL, 5018258455937968775ULL, 14100621120178668337ULL
     };
   uint64_t *r1 = scalar;
-  uint64_t *r2 = scalar + (uint32_t)1U;
-  uint64_t *r3 = scalar + (uint32_t)2U;
-  uint64_t *r4 = scalar + (uint32_t)3U;
+  uint64_t *r2 = scalar + 1U;
+  uint64_t *r3 = scalar + 2U;
+  uint64_t *r4 = scalar + 3U;
   make_point_at_inf(res);
   uint64_t tmp[12U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, point_double(res, res););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     point_add(res, res, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     point_add(res, res, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     point_add(res, res, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     point_add(res, res, tmp););
-  KRML_HOST_IGNORE(q1);
-  KRML_HOST_IGNORE(q2);
-  KRML_HOST_IGNORE(q3);
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q1);
+  KRML_MAYBE_UNUSED_VAR(q2);
+  KRML_MAYBE_UNUSED_VAR(q3);
+  KRML_MAYBE_UNUSED_VAR(q4);
 }
 
 static inline void
@@ -1075,54 +1061,48 @@ point_mul_double_g(uint64_t *res, uint64_t *scalar1, uint64_t *scalar2, uint64_t
   uint64_t table2[384U] = { 0U };
   uint64_t tmp[12U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)12U;
+  uint64_t *t1 = table2 + 12U;
   make_point_at_inf(t0);
-  memcpy(t1, q2, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(t1, q2, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 12U;
     point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;
+    memcpy(table2 + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 12U;
     point_add(tmp, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t)););
   uint64_t tmp0[12U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)12U;
-  memcpy(res, (uint64_t *)a_bits_l, (uint32_t)12U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U);
+  const uint64_t *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 12U;
+  memcpy(res, (uint64_t *)a_bits_l, 12U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)12U;
-  memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)12U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 12U;
+  memcpy(tmp0, (uint64_t *)a_bits_l0, 12U * sizeof (uint64_t));
   point_add(res, res, tmp0);
   uint64_t tmp1[12U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, point_double(res, res););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)12U;
-    memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)12U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 12U;
+    memcpy(tmp1, (uint64_t *)a_bits_l1, 12U * sizeof (uint64_t));
     point_add(res, res, tmp1);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)12U;
-    memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)12U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 12U;
+    memcpy(tmp1, (uint64_t *)a_bits_l2, 12U * sizeof (uint64_t));
     point_add(res, res, tmp1);
   }
 }
@@ -1132,7 +1112,7 @@ static inline uint64_t bn_is_lt_order_mask4(uint64_t *f)
   uint64_t tmp[4U] = { 0U };
   make_order(tmp);
   uint64_t c = bn_sub4(tmp, f, tmp);
-  return (uint64_t)0U - c;
+  return 0ULL - c;
 }
 
 static inline uint64_t bn_is_lt_order_and_gt_zero_mask4(uint64_t *f)
@@ -1161,61 +1141,61 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x)
 {
   uint64_t n[4U] = { 0U };
   make_order(n);
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = (uint64_t)0xccd1c8aaee00bc4fU * x[i0];
+    0U,
+    4U,
+    1U,
+    uint64_t qj = 0xccd1c8aaee00bc4fULL * x[i0];
     uint64_t *res_j0 = x + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = x + (uint32_t)4U + i0;
-    uint64_t res_j = x[(uint32_t)4U + i0];
+    uint64_t *resb = x + 4U + i0;
+    uint64_t res_j = x[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, x + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -1224,7 +1204,7 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x)
 static inline void from_qmont(uint64_t *res, uint64_t *x)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, x, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, x, 4U * sizeof (uint64_t));
   qmont_reduction(res, tmp);
 }
 
@@ -1246,18 +1226,18 @@ bool Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key)
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *sk = tmp;
-  uint64_t *pk = tmp + (uint32_t)4U;
+  uint64_t *pk = tmp + 4U;
   bn_from_bytes_be4(sk, private_key);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -1265,7 +1245,7 @@ bool Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key)
   uint64_t is_sk_valid = is_b_valid;
   point_mul_g(pk, sk);
   point_store(public_key, pk);
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 bool
@@ -1277,19 +1257,19 @@ Hacl_Impl_P256_DH_ecp256dh_r(
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *sk = tmp;
-  uint64_t *pk = tmp + (uint32_t)4U;
+  uint64_t *pk = tmp + 4U;
   bool is_pk_valid = load_point_vartime(pk, their_pubkey);
   bn_from_bytes_be4(sk, private_key);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -1301,27 +1281,27 @@ Hacl_Impl_P256_DH_ecp256dh_r(
     point_mul(ss_proj, sk, pk);
     point_store(shared_secret, ss_proj);
   }
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL && is_pk_valid;
 }
 
 static inline void qinv(uint64_t *res, uint64_t *r)
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *x6 = tmp;
-  uint64_t *x_11 = tmp + (uint32_t)4U;
-  uint64_t *x_101 = tmp + (uint32_t)8U;
-  uint64_t *x_111 = tmp + (uint32_t)12U;
-  uint64_t *x_1111 = tmp + (uint32_t)16U;
-  uint64_t *x_10101 = tmp + (uint32_t)20U;
-  uint64_t *x_101111 = tmp + (uint32_t)24U;
-  memcpy(x6, r, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *x_11 = tmp + 4U;
+  uint64_t *x_101 = tmp + 8U;
+  uint64_t *x_111 = tmp + 12U;
+  uint64_t *x_1111 = tmp + 16U;
+  uint64_t *x_10101 = tmp + 20U;
+  uint64_t *x_101111 = tmp + 24U;
+  memcpy(x6, r, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
   qmul(x_11, x6, r);
   qmul(x_101, x6, x_11);
   qmul(x_111, x6, x_101);
-  memcpy(x6, x_101, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, x_101, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
@@ -1330,86 +1310,86 @@ static inline void qinv(uint64_t *res, uint64_t *r)
     qsqr(x6, x6);
   }
   qmul(x_10101, x6, r);
-  memcpy(x6, x_10101, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, x_10101, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
   qmul(x_101111, x_101, x6);
   qmul(x6, x_10101, x6);
   uint64_t tmp1[4U] = { 0U };
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(x6, x6););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(x6, x6););
   qmul(x6, x6, x_11);
-  memcpy(tmp1, x6, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  memcpy(tmp1, x6, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x6);
-  memcpy(x6, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, qsqr(x6, x6););
+  memcpy(x6, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, qsqr(x6, x6););
   qmul(x6, x6, tmp1);
-  memcpy(tmp1, x6, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memcpy(tmp1, x6, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     qsqr(tmp1, tmp1);
   }
   qmul(tmp1, tmp1, x6);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     qsqr(tmp1, tmp1);
   }
   qmul(tmp1, tmp1, x6);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_10101);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR9(i, (uint32_t)0U, (uint32_t)9U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR9(i, 0U, 9U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR10(i, (uint32_t)0U, (uint32_t)10U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR10(i, 0U, 10U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR7(i, (uint32_t)0U, (uint32_t)7U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR7(i, 0U, 7U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_10101);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  memcpy(x6, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(res, x6, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, tmp1, 4U * sizeof (uint64_t));
+  memcpy(res, x6, 4U * sizeof (uint64_t));
 }
 
 static inline void qmul_mont(uint64_t *sinv, uint64_t *b, uint64_t *res)
@@ -1429,20 +1409,16 @@ ecdsa_verify_msg_as_qelem(
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *r_q = tmp + (uint32_t)12U;
-  uint64_t *s_q = tmp + (uint32_t)16U;
-  uint64_t *u1 = tmp + (uint32_t)20U;
-  uint64_t *u2 = tmp + (uint32_t)24U;
+  uint64_t *r_q = tmp + 12U;
+  uint64_t *s_q = tmp + 16U;
+  uint64_t *u1 = tmp + 20U;
+  uint64_t *u2 = tmp + 24U;
   bool is_pk_valid = load_point_vartime(pk, public_key);
   bn_from_bytes_be4(r_q, signature_r);
   bn_from_bytes_be4(s_q, signature_s);
   uint64_t is_r_valid = bn_is_lt_order_and_gt_zero_mask4(r_q);
   uint64_t is_s_valid = bn_is_lt_order_and_gt_zero_mask4(s_q);
-  bool
-  is_rs_valid =
-    is_r_valid
-    == (uint64_t)0xFFFFFFFFFFFFFFFFU
-    && is_s_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_rs_valid = is_r_valid == 0xFFFFFFFFFFFFFFFFULL && is_s_valid == 0xFFFFFFFFFFFFFFFFULL;
   if (!(is_pk_valid && is_rs_valid))
   {
     return false;
@@ -1474,20 +1450,20 @@ ecdsa_sign_msg_as_qelem(
 {
   uint64_t rsdk_q[16U] = { 0U };
   uint64_t *r_q = rsdk_q;
-  uint64_t *s_q = rsdk_q + (uint32_t)4U;
-  uint64_t *d_a = rsdk_q + (uint32_t)8U;
-  uint64_t *k_q = rsdk_q + (uint32_t)12U;
+  uint64_t *s_q = rsdk_q + 4U;
+  uint64_t *d_a = rsdk_q + 8U;
+  uint64_t *k_q = rsdk_q + 12U;
   bn_from_bytes_be4(d_a, private_key);
   uint64_t is_b_valid0 = bn_is_lt_order_and_gt_zero_mask4(d_a);
   uint64_t oneq0[4U] = { 0U };
-  oneq0[0U] = (uint64_t)1U;
-  oneq0[1U] = (uint64_t)0U;
-  oneq0[2U] = (uint64_t)0U;
-  oneq0[3U] = (uint64_t)0U;
+  oneq0[0U] = 1ULL;
+  oneq0[1U] = 0ULL;
+  oneq0[2U] = 0ULL;
+  oneq0[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = d_a;
     uint64_t uu____0 = oneq0[i];
     uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0));
@@ -1496,14 +1472,14 @@ ecdsa_sign_msg_as_qelem(
   bn_from_bytes_be4(k_q, nonce);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(k_q);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = k_q;
     uint64_t uu____1 = oneq[i];
     uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1));
@@ -1524,7 +1500,7 @@ ecdsa_sign_msg_as_qelem(
   uint64_t is_r_zero = bn_is_zero_mask4(r_q);
   uint64_t is_s_zero = bn_is_zero_mask4(s_q);
   uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero);
-  bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool res = m == 0xFFFFFFFFFFFFFFFFULL;
   return res;
 }
 
@@ -1571,8 +1547,8 @@ Hacl_P256_ecdsa_sign_p256_sha2(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_256(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1604,8 +1580,8 @@ Hacl_P256_ecdsa_sign_p256_sha384(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[48U] = { 0U };
-  Hacl_Streaming_SHA2_hash_384(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_384(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1637,8 +1613,8 @@ Hacl_P256_ecdsa_sign_p256_sha512(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[64U] = { 0U };
-  Hacl_Streaming_SHA2_hash_512(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_512(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1680,8 +1656,8 @@ Hacl_P256_ecdsa_sign_p256_without_hash(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  memcpy(mHash, msg, (uint32_t)32U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(msg_len);
+  memcpy(mHash, msg, 32U * sizeof (uint8_t));
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1716,8 +1692,8 @@ Hacl_P256_ecdsa_verif_p256_sha2(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_256(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1747,8 +1723,8 @@ Hacl_P256_ecdsa_verif_p256_sha384(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[48U] = { 0U };
-  Hacl_Streaming_SHA2_hash_384(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_384(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1778,8 +1754,8 @@ Hacl_P256_ecdsa_verif_p256_sha512(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[64U] = { 0U };
-  Hacl_Streaming_SHA2_hash_512(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_512(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1814,8 +1790,8 @@ Hacl_P256_ecdsa_verif_without_hash(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  memcpy(mHash, msg, (uint32_t)32U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(msg_len);
+  memcpy(mHash, msg, 32U * sizeof (uint8_t));
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1864,7 +1840,7 @@ bool Hacl_P256_validate_private_key(uint8_t *private_key)
   uint64_t bn_sk[4U] = { 0U };
   bn_from_bytes_be4(bn_sk, private_key);
   uint64_t res = bn_is_lt_order_and_gt_zero_mask4(bn_sk);
-  return res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return res == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /*******************************************************************************
@@ -1893,11 +1869,11 @@ Convert a public key from uncompressed to its raw form.
 bool Hacl_P256_uncompressed_to_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint8_t pk0 = pk[0U];
-  if (pk0 != (uint8_t)0x04U)
+  if (pk0 != 0x04U)
   {
     return false;
   }
-  memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(pk_raw, pk + 1U, 64U * sizeof (uint8_t));
   return true;
 }
 
@@ -1915,12 +1891,12 @@ bool Hacl_P256_compressed_to_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint64_t xa[4U] = { 0U };
   uint64_t ya[4U] = { 0U };
-  uint8_t *pk_xb = pk + (uint32_t)1U;
+  uint8_t *pk_xb = pk + 1U;
   bool b = aff_point_decompress_vartime(xa, ya, pk);
   if (b)
   {
-    memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof (uint8_t));
-    bn_to_bytes_be4(pk_raw + (uint32_t)32U, ya);
+    memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t));
+    bn_to_bytes_be4(pk_raw + 32U, ya);
   }
   return b;
 }
@@ -1935,8 +1911,8 @@ Convert a public key from raw to its uncompressed form.
 */
 void Hacl_P256_raw_to_uncompressed(uint8_t *pk_raw, uint8_t *pk)
 {
-  pk[0U] = (uint8_t)0x04U;
-  memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof (uint8_t));
+  pk[0U] = 0x04U;
+  memcpy(pk + 1U, pk_raw, 64U * sizeof (uint8_t));
 }
 
 /**
@@ -1950,12 +1926,12 @@ Convert a public key from raw to its compressed form.
 void Hacl_P256_raw_to_compressed(uint8_t *pk_raw, uint8_t *pk)
 {
   uint8_t *pk_x = pk_raw;
-  uint8_t *pk_y = pk_raw + (uint32_t)32U;
+  uint8_t *pk_y = pk_raw + 32U;
   uint64_t bn_f[4U] = { 0U };
   bn_from_bytes_be4(bn_f, pk_y);
-  uint64_t is_odd_f = bn_f[0U] & (uint64_t)1U;
-  pk[0U] = (uint8_t)is_odd_f + (uint8_t)0x02U;
-  memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof (uint8_t));
+  uint64_t is_odd_f = bn_f[0U] & 1ULL;
+  pk[0U] = (uint32_t)(uint8_t)is_odd_f + 0x02U;
+  memcpy(pk + 1U, pk_x, 32U * sizeof (uint8_t));
 }
 
 
diff --git a/src/Hacl_Poly1305_32.c b/src/Hacl_Poly1305_32.c
deleted file mode 100644
index 5192559b..00000000
--- a/src/Hacl_Poly1305_32.c
+++ /dev/null
@@ -1,572 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Poly1305_32.h"
-
-void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key)
-{
-  uint64_t *acc = ctx;
-  uint64_t *pre = ctx + (uint32_t)5U;
-  uint8_t *kr = key;
-  acc[0U] = (uint64_t)0U;
-  acc[1U] = (uint64_t)0U;
-  acc[2U] = (uint64_t)0U;
-  acc[3U] = (uint64_t)0U;
-  acc[4U] = (uint64_t)0U;
-  uint64_t u0 = load64_le(kr);
-  uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
-  uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
-  uint64_t lo1 = lo & mask0;
-  uint64_t hi1 = hi & mask1;
-  uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
-  uint64_t *rn = pre + (uint32_t)10U;
-  uint64_t *rn_5 = pre + (uint32_t)15U;
-  uint64_t r_vec0 = lo1;
-  uint64_t r_vec1 = hi1;
-  uint64_t f00 = r_vec0 & (uint64_t)0x3ffffffU;
-  uint64_t f10 = r_vec0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = r_vec0 >> (uint32_t)52U | (r_vec1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = r_vec1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = r_vec1 >> (uint32_t)40U;
-  uint64_t f0 = f00;
-  uint64_t f1 = f10;
-  uint64_t f2 = f20;
-  uint64_t f3 = f30;
-  uint64_t f4 = f40;
-  r[0U] = f0;
-  r[1U] = f1;
-  r[2U] = f2;
-  r[3U] = f3;
-  r[4U] = f4;
-  uint64_t f200 = r[0U];
-  uint64_t f21 = r[1U];
-  uint64_t f22 = r[2U];
-  uint64_t f23 = r[3U];
-  uint64_t f24 = r[4U];
-  r5[0U] = f200 * (uint64_t)5U;
-  r5[1U] = f21 * (uint64_t)5U;
-  r5[2U] = f22 * (uint64_t)5U;
-  r5[3U] = f23 * (uint64_t)5U;
-  r5[4U] = f24 * (uint64_t)5U;
-  rn[0U] = r[0U];
-  rn[1U] = r[1U];
-  rn[2U] = r[2U];
-  rn[3U] = r[3U];
-  rn[4U] = r[4U];
-  rn_5[0U] = r5[0U];
-  rn_5[1U] = r5[1U];
-  rn_5[2U] = r5[2U];
-  rn_5[3U] = r5[3U];
-  rn_5[4U] = r5[4U];
-}
-
-void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
-{
-  uint64_t *pre = ctx + (uint32_t)5U;
-  uint64_t *acc = ctx;
-  uint64_t e[5U] = { 0U };
-  uint64_t u0 = load64_le(text);
-  uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
-  uint64_t hi = u;
-  uint64_t f0 = lo;
-  uint64_t f1 = hi;
-  uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-  uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = f1 >> (uint32_t)40U;
-  uint64_t f01 = f010;
-  uint64_t f111 = f110;
-  uint64_t f2 = f20;
-  uint64_t f3 = f30;
-  uint64_t f41 = f40;
-  e[0U] = f01;
-  e[1U] = f111;
-  e[2U] = f2;
-  e[3U] = f3;
-  e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
-  uint64_t mask = b;
-  uint64_t f4 = e[4U];
-  e[4U] = f4 | mask;
-  uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
-  uint64_t r0 = r[0U];
-  uint64_t r1 = r[1U];
-  uint64_t r2 = r[2U];
-  uint64_t r3 = r[3U];
-  uint64_t r4 = r[4U];
-  uint64_t r51 = r5[1U];
-  uint64_t r52 = r5[2U];
-  uint64_t r53 = r5[3U];
-  uint64_t r54 = r5[4U];
-  uint64_t f10 = e[0U];
-  uint64_t f11 = e[1U];
-  uint64_t f12 = e[2U];
-  uint64_t f13 = e[3U];
-  uint64_t f14 = e[4U];
-  uint64_t a0 = acc[0U];
-  uint64_t a1 = acc[1U];
-  uint64_t a2 = acc[2U];
-  uint64_t a3 = acc[3U];
-  uint64_t a4 = acc[4U];
-  uint64_t a01 = a0 + f10;
-  uint64_t a11 = a1 + f11;
-  uint64_t a21 = a2 + f12;
-  uint64_t a31 = a3 + f13;
-  uint64_t a41 = a4 + f14;
-  uint64_t a02 = r0 * a01;
-  uint64_t a12 = r1 * a01;
-  uint64_t a22 = r2 * a01;
-  uint64_t a32 = r3 * a01;
-  uint64_t a42 = r4 * a01;
-  uint64_t a03 = a02 + r54 * a11;
-  uint64_t a13 = a12 + r0 * a11;
-  uint64_t a23 = a22 + r1 * a11;
-  uint64_t a33 = a32 + r2 * a11;
-  uint64_t a43 = a42 + r3 * a11;
-  uint64_t a04 = a03 + r53 * a21;
-  uint64_t a14 = a13 + r54 * a21;
-  uint64_t a24 = a23 + r0 * a21;
-  uint64_t a34 = a33 + r1 * a21;
-  uint64_t a44 = a43 + r2 * a21;
-  uint64_t a05 = a04 + r52 * a31;
-  uint64_t a15 = a14 + r53 * a31;
-  uint64_t a25 = a24 + r54 * a31;
-  uint64_t a35 = a34 + r0 * a31;
-  uint64_t a45 = a44 + r1 * a31;
-  uint64_t a06 = a05 + r51 * a41;
-  uint64_t a16 = a15 + r52 * a41;
-  uint64_t a26 = a25 + r53 * a41;
-  uint64_t a36 = a35 + r54 * a41;
-  uint64_t a46 = a45 + r0 * a41;
-  uint64_t t0 = a06;
-  uint64_t t1 = a16;
-  uint64_t t2 = a26;
-  uint64_t t3 = a36;
-  uint64_t t4 = a46;
-  uint64_t mask26 = (uint64_t)0x3ffffffU;
-  uint64_t z0 = t0 >> (uint32_t)26U;
-  uint64_t z1 = t3 >> (uint32_t)26U;
-  uint64_t x0 = t0 & mask26;
-  uint64_t x3 = t3 & mask26;
-  uint64_t x1 = t1 + z0;
-  uint64_t x4 = t4 + z1;
-  uint64_t z01 = x1 >> (uint32_t)26U;
-  uint64_t z11 = x4 >> (uint32_t)26U;
-  uint64_t t = z11 << (uint32_t)2U;
-  uint64_t z12 = z11 + t;
-  uint64_t x11 = x1 & mask26;
-  uint64_t x41 = x4 & mask26;
-  uint64_t x2 = t2 + z01;
-  uint64_t x01 = x0 + z12;
-  uint64_t z02 = x2 >> (uint32_t)26U;
-  uint64_t z13 = x01 >> (uint32_t)26U;
-  uint64_t x21 = x2 & mask26;
-  uint64_t x02 = x01 & mask26;
-  uint64_t x31 = x3 + z02;
-  uint64_t x12 = x11 + z13;
-  uint64_t z03 = x31 >> (uint32_t)26U;
-  uint64_t x32 = x31 & mask26;
-  uint64_t x42 = x41 + z03;
-  uint64_t o0 = x02;
-  uint64_t o1 = x12;
-  uint64_t o2 = x21;
-  uint64_t o3 = x32;
-  uint64_t o4 = x42;
-  acc[0U] = o0;
-  acc[1U] = o1;
-  acc[2U] = o2;
-  acc[3U] = o3;
-  acc[4U] = o4;
-}
-
-void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
-{
-  uint64_t *pre = ctx + (uint32_t)5U;
-  uint64_t *acc = ctx;
-  uint32_t nb = len / (uint32_t)16U;
-  uint32_t rem = len % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    uint8_t *block = text + i * (uint32_t)16U;
-    uint64_t e[5U] = { 0U };
-    uint64_t u0 = load64_le(block);
-    uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
-    uint64_t hi = u;
-    uint64_t f0 = lo;
-    uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
-    uint64_t f01 = f010;
-    uint64_t f111 = f110;
-    uint64_t f2 = f20;
-    uint64_t f3 = f30;
-    uint64_t f41 = f40;
-    e[0U] = f01;
-    e[1U] = f111;
-    e[2U] = f2;
-    e[3U] = f3;
-    e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
-    uint64_t mask = b;
-    uint64_t f4 = e[4U];
-    e[4U] = f4 | mask;
-    uint64_t *r = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
-    uint64_t r0 = r[0U];
-    uint64_t r1 = r[1U];
-    uint64_t r2 = r[2U];
-    uint64_t r3 = r[3U];
-    uint64_t r4 = r[4U];
-    uint64_t r51 = r5[1U];
-    uint64_t r52 = r5[2U];
-    uint64_t r53 = r5[3U];
-    uint64_t r54 = r5[4U];
-    uint64_t f10 = e[0U];
-    uint64_t f11 = e[1U];
-    uint64_t f12 = e[2U];
-    uint64_t f13 = e[3U];
-    uint64_t f14 = e[4U];
-    uint64_t a0 = acc[0U];
-    uint64_t a1 = acc[1U];
-    uint64_t a2 = acc[2U];
-    uint64_t a3 = acc[3U];
-    uint64_t a4 = acc[4U];
-    uint64_t a01 = a0 + f10;
-    uint64_t a11 = a1 + f11;
-    uint64_t a21 = a2 + f12;
-    uint64_t a31 = a3 + f13;
-    uint64_t a41 = a4 + f14;
-    uint64_t a02 = r0 * a01;
-    uint64_t a12 = r1 * a01;
-    uint64_t a22 = r2 * a01;
-    uint64_t a32 = r3 * a01;
-    uint64_t a42 = r4 * a01;
-    uint64_t a03 = a02 + r54 * a11;
-    uint64_t a13 = a12 + r0 * a11;
-    uint64_t a23 = a22 + r1 * a11;
-    uint64_t a33 = a32 + r2 * a11;
-    uint64_t a43 = a42 + r3 * a11;
-    uint64_t a04 = a03 + r53 * a21;
-    uint64_t a14 = a13 + r54 * a21;
-    uint64_t a24 = a23 + r0 * a21;
-    uint64_t a34 = a33 + r1 * a21;
-    uint64_t a44 = a43 + r2 * a21;
-    uint64_t a05 = a04 + r52 * a31;
-    uint64_t a15 = a14 + r53 * a31;
-    uint64_t a25 = a24 + r54 * a31;
-    uint64_t a35 = a34 + r0 * a31;
-    uint64_t a45 = a44 + r1 * a31;
-    uint64_t a06 = a05 + r51 * a41;
-    uint64_t a16 = a15 + r52 * a41;
-    uint64_t a26 = a25 + r53 * a41;
-    uint64_t a36 = a35 + r54 * a41;
-    uint64_t a46 = a45 + r0 * a41;
-    uint64_t t0 = a06;
-    uint64_t t1 = a16;
-    uint64_t t2 = a26;
-    uint64_t t3 = a36;
-    uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
-    uint64_t x0 = t0 & mask26;
-    uint64_t x3 = t3 & mask26;
-    uint64_t x1 = t1 + z0;
-    uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
-    uint64_t z12 = z11 + t;
-    uint64_t x11 = x1 & mask26;
-    uint64_t x41 = x4 & mask26;
-    uint64_t x2 = t2 + z01;
-    uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
-    uint64_t x21 = x2 & mask26;
-    uint64_t x02 = x01 & mask26;
-    uint64_t x31 = x3 + z02;
-    uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
-    uint64_t x32 = x31 & mask26;
-    uint64_t x42 = x41 + z03;
-    uint64_t o0 = x02;
-    uint64_t o1 = x12;
-    uint64_t o2 = x21;
-    uint64_t o3 = x32;
-    uint64_t o4 = x42;
-    acc[0U] = o0;
-    acc[1U] = o1;
-    acc[2U] = o2;
-    acc[3U] = o3;
-    acc[4U] = o4;
-  }
-  if (rem > (uint32_t)0U)
-  {
-    uint8_t *last = text + nb * (uint32_t)16U;
-    uint64_t e[5U] = { 0U };
-    uint8_t tmp[16U] = { 0U };
-    memcpy(tmp, last, rem * sizeof (uint8_t));
-    uint64_t u0 = load64_le(tmp);
-    uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
-    uint64_t hi = u;
-    uint64_t f0 = lo;
-    uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
-    uint64_t f01 = f010;
-    uint64_t f111 = f110;
-    uint64_t f2 = f20;
-    uint64_t f3 = f30;
-    uint64_t f4 = f40;
-    e[0U] = f01;
-    e[1U] = f111;
-    e[2U] = f2;
-    e[3U] = f3;
-    e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
-    uint64_t mask = b;
-    uint64_t fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = fi | mask;
-    uint64_t *r = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
-    uint64_t r0 = r[0U];
-    uint64_t r1 = r[1U];
-    uint64_t r2 = r[2U];
-    uint64_t r3 = r[3U];
-    uint64_t r4 = r[4U];
-    uint64_t r51 = r5[1U];
-    uint64_t r52 = r5[2U];
-    uint64_t r53 = r5[3U];
-    uint64_t r54 = r5[4U];
-    uint64_t f10 = e[0U];
-    uint64_t f11 = e[1U];
-    uint64_t f12 = e[2U];
-    uint64_t f13 = e[3U];
-    uint64_t f14 = e[4U];
-    uint64_t a0 = acc[0U];
-    uint64_t a1 = acc[1U];
-    uint64_t a2 = acc[2U];
-    uint64_t a3 = acc[3U];
-    uint64_t a4 = acc[4U];
-    uint64_t a01 = a0 + f10;
-    uint64_t a11 = a1 + f11;
-    uint64_t a21 = a2 + f12;
-    uint64_t a31 = a3 + f13;
-    uint64_t a41 = a4 + f14;
-    uint64_t a02 = r0 * a01;
-    uint64_t a12 = r1 * a01;
-    uint64_t a22 = r2 * a01;
-    uint64_t a32 = r3 * a01;
-    uint64_t a42 = r4 * a01;
-    uint64_t a03 = a02 + r54 * a11;
-    uint64_t a13 = a12 + r0 * a11;
-    uint64_t a23 = a22 + r1 * a11;
-    uint64_t a33 = a32 + r2 * a11;
-    uint64_t a43 = a42 + r3 * a11;
-    uint64_t a04 = a03 + r53 * a21;
-    uint64_t a14 = a13 + r54 * a21;
-    uint64_t a24 = a23 + r0 * a21;
-    uint64_t a34 = a33 + r1 * a21;
-    uint64_t a44 = a43 + r2 * a21;
-    uint64_t a05 = a04 + r52 * a31;
-    uint64_t a15 = a14 + r53 * a31;
-    uint64_t a25 = a24 + r54 * a31;
-    uint64_t a35 = a34 + r0 * a31;
-    uint64_t a45 = a44 + r1 * a31;
-    uint64_t a06 = a05 + r51 * a41;
-    uint64_t a16 = a15 + r52 * a41;
-    uint64_t a26 = a25 + r53 * a41;
-    uint64_t a36 = a35 + r54 * a41;
-    uint64_t a46 = a45 + r0 * a41;
-    uint64_t t0 = a06;
-    uint64_t t1 = a16;
-    uint64_t t2 = a26;
-    uint64_t t3 = a36;
-    uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
-    uint64_t x0 = t0 & mask26;
-    uint64_t x3 = t3 & mask26;
-    uint64_t x1 = t1 + z0;
-    uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
-    uint64_t z12 = z11 + t;
-    uint64_t x11 = x1 & mask26;
-    uint64_t x41 = x4 & mask26;
-    uint64_t x2 = t2 + z01;
-    uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
-    uint64_t x21 = x2 & mask26;
-    uint64_t x02 = x01 & mask26;
-    uint64_t x31 = x3 + z02;
-    uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
-    uint64_t x32 = x31 & mask26;
-    uint64_t x42 = x41 + z03;
-    uint64_t o0 = x02;
-    uint64_t o1 = x12;
-    uint64_t o2 = x21;
-    uint64_t o3 = x32;
-    uint64_t o4 = x42;
-    acc[0U] = o0;
-    acc[1U] = o1;
-    acc[2U] = o2;
-    acc[3U] = o3;
-    acc[4U] = o4;
-    return;
-  }
-}
-
-void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
-{
-  uint64_t *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
-  uint64_t f0 = acc[0U];
-  uint64_t f13 = acc[1U];
-  uint64_t f23 = acc[2U];
-  uint64_t f33 = acc[3U];
-  uint64_t f40 = acc[4U];
-  uint64_t l0 = f0 + (uint64_t)0U;
-  uint64_t tmp00 = l0 & (uint64_t)0x3ffffffU;
-  uint64_t c00 = l0 >> (uint32_t)26U;
-  uint64_t l1 = f13 + c00;
-  uint64_t tmp10 = l1 & (uint64_t)0x3ffffffU;
-  uint64_t c10 = l1 >> (uint32_t)26U;
-  uint64_t l2 = f23 + c10;
-  uint64_t tmp20 = l2 & (uint64_t)0x3ffffffU;
-  uint64_t c20 = l2 >> (uint32_t)26U;
-  uint64_t l3 = f33 + c20;
-  uint64_t tmp30 = l3 & (uint64_t)0x3ffffffU;
-  uint64_t c30 = l3 >> (uint32_t)26U;
-  uint64_t l4 = f40 + c30;
-  uint64_t tmp40 = l4 & (uint64_t)0x3ffffffU;
-  uint64_t c40 = l4 >> (uint32_t)26U;
-  uint64_t f010 = tmp00 + c40 * (uint64_t)5U;
-  uint64_t f110 = tmp10;
-  uint64_t f210 = tmp20;
-  uint64_t f310 = tmp30;
-  uint64_t f410 = tmp40;
-  uint64_t l = f010 + (uint64_t)0U;
-  uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
-  uint64_t c0 = l >> (uint32_t)26U;
-  uint64_t l5 = f110 + c0;
-  uint64_t tmp1 = l5 & (uint64_t)0x3ffffffU;
-  uint64_t c1 = l5 >> (uint32_t)26U;
-  uint64_t l6 = f210 + c1;
-  uint64_t tmp2 = l6 & (uint64_t)0x3ffffffU;
-  uint64_t c2 = l6 >> (uint32_t)26U;
-  uint64_t l7 = f310 + c2;
-  uint64_t tmp3 = l7 & (uint64_t)0x3ffffffU;
-  uint64_t c3 = l7 >> (uint32_t)26U;
-  uint64_t l8 = f410 + c3;
-  uint64_t tmp4 = l8 & (uint64_t)0x3ffffffU;
-  uint64_t c4 = l8 >> (uint32_t)26U;
-  uint64_t f02 = tmp0 + c4 * (uint64_t)5U;
-  uint64_t f12 = tmp1;
-  uint64_t f22 = tmp2;
-  uint64_t f32 = tmp3;
-  uint64_t f42 = tmp4;
-  uint64_t mh = (uint64_t)0x3ffffffU;
-  uint64_t ml = (uint64_t)0x3fffffbU;
-  uint64_t mask = FStar_UInt64_eq_mask(f42, mh);
-  uint64_t mask1 = mask & FStar_UInt64_eq_mask(f32, mh);
-  uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f22, mh);
-  uint64_t mask3 = mask2 & FStar_UInt64_eq_mask(f12, mh);
-  uint64_t mask4 = mask3 & ~~FStar_UInt64_gte_mask(f02, ml);
-  uint64_t ph = mask4 & mh;
-  uint64_t pl = mask4 & ml;
-  uint64_t o0 = f02 - pl;
-  uint64_t o1 = f12 - ph;
-  uint64_t o2 = f22 - ph;
-  uint64_t o3 = f32 - ph;
-  uint64_t o4 = f42 - ph;
-  uint64_t f011 = o0;
-  uint64_t f111 = o1;
-  uint64_t f211 = o2;
-  uint64_t f311 = o3;
-  uint64_t f411 = o4;
-  acc[0U] = f011;
-  acc[1U] = f111;
-  acc[2U] = f211;
-  acc[3U] = f311;
-  acc[4U] = f411;
-  uint64_t f00 = acc[0U];
-  uint64_t f1 = acc[1U];
-  uint64_t f2 = acc[2U];
-  uint64_t f3 = acc[3U];
-  uint64_t f4 = acc[4U];
-  uint64_t f01 = f00;
-  uint64_t f112 = f1;
-  uint64_t f212 = f2;
-  uint64_t f312 = f3;
-  uint64_t f41 = f4;
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
-  uint64_t f10 = lo;
-  uint64_t f11 = hi;
-  uint64_t u0 = load64_le(ks);
-  uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
-  uint64_t hi0 = u;
-  uint64_t f20 = lo0;
-  uint64_t f21 = hi0;
-  uint64_t r0 = f10 + f20;
-  uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
-  uint64_t r11 = r1 + c;
-  uint64_t f30 = r0;
-  uint64_t f31 = r11;
-  store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
-}
-
-void Hacl_Poly1305_32_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
-{
-  uint64_t ctx[25U] = { 0U };
-  Hacl_Poly1305_32_poly1305_init(ctx, key);
-  Hacl_Poly1305_32_poly1305_update(ctx, len, text);
-  Hacl_Poly1305_32_poly1305_finish(tag, key, ctx);
-}
-
diff --git a/src/Hacl_RSAPSS.c b/src/Hacl_RSAPSS.c
index ceb9a6f0..71e141d0 100644
--- a/src/Hacl_RSAPSS.c
+++ b/src/Hacl_RSAPSS.c
@@ -35,51 +35,51 @@ static inline uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)20U;
+        return 20U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
@@ -96,17 +96,17 @@ hash(Spec_Hash_Definitions_hash_alg a, uint8_t *mHash, uint32_t msgLen, uint8_t
   {
     case Spec_Hash_Definitions_SHA2_256:
       {
-        Hacl_Streaming_SHA2_hash_256(msg, msgLen, mHash);
+        Hacl_Hash_SHA2_hash_256(mHash, msg, msgLen);
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        Hacl_Streaming_SHA2_hash_384(msg, msgLen, mHash);
+        Hacl_Hash_SHA2_hash_384(mHash, msg, msgLen);
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        Hacl_Streaming_SHA2_hash_512(msg, msgLen, mHash);
+        Hacl_Hash_SHA2_hash_512(mHash, msg, msgLen);
         break;
       }
     default:
@@ -126,48 +126,48 @@ mgf_hash(
   uint8_t *res
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint8_t), len + (uint32_t)4U);
-  uint8_t mgfseed_counter[len + (uint32_t)4U];
-  memset(mgfseed_counter, 0U, (len + (uint32_t)4U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), len + 4U);
+  uint8_t mgfseed_counter[len + 4U];
+  memset(mgfseed_counter, 0U, (len + 4U) * sizeof (uint8_t));
   memcpy(mgfseed_counter, mgfseed, len * sizeof (uint8_t));
   uint32_t hLen = hash_len(a);
-  uint32_t n = (maskLen - (uint32_t)1U) / hLen + (uint32_t)1U;
+  uint32_t n = (maskLen - 1U) / hLen + 1U;
   uint32_t accLen = n * hLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), accLen);
   uint8_t acc[accLen];
   memset(acc, 0U, accLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
     uint8_t *acc_i = acc + i * hLen;
     uint8_t *c = mgfseed_counter + len;
-    c[0U] = (uint8_t)(i >> (uint32_t)24U);
-    c[1U] = (uint8_t)(i >> (uint32_t)16U);
-    c[2U] = (uint8_t)(i >> (uint32_t)8U);
+    c[0U] = (uint8_t)(i >> 24U);
+    c[1U] = (uint8_t)(i >> 16U);
+    c[2U] = (uint8_t)(i >> 8U);
     c[3U] = (uint8_t)i;
-    hash(a, acc_i, len + (uint32_t)4U, mgfseed_counter);
+    hash(a, acc_i, len + 4U, mgfseed_counter);
   }
   memcpy(res, acc, maskLen * sizeof (uint8_t));
 }
 
 static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b)
 {
-  uint32_t bLen = (bs - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  if (bs == (uint32_t)64U * bLen)
+  uint32_t bLen = (bs - 1U) / 64U + 1U;
+  if (bs == 64U * bLen)
   {
-    return (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    return 0xFFFFFFFFFFFFFFFFULL;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
   uint64_t b2[bLen];
   memset(b2, 0U, bLen * sizeof (uint64_t));
-  uint32_t i0 = bs / (uint32_t)64U;
-  uint32_t j = bs % (uint32_t)64U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+  uint32_t i0 = bs / 64U;
+  uint32_t j = bs % 64U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < bLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc;
   return res;
@@ -175,21 +175,21 @@ static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b)
 
 static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n)
 {
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint64_t bits0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bits0;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint64_t bits0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bits0;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t b2[nLen];
   memset(b2, 0U, nLen * sizeof (uint64_t));
-  uint32_t i0 = (modBits - (uint32_t)1U) / (uint32_t)64U;
-  uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint32_t i0 = (modBits - 1U) / 64U;
+  uint32_t j = (modBits - 1U) % 64U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b2[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc;
   uint64_t m1 = res;
@@ -199,12 +199,12 @@ static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n)
 
 static inline uint64_t check_exponent_u64(uint32_t eBits, uint64_t *e)
 {
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), eLen);
   uint64_t bn_zero[eLen];
   memset(bn_zero, 0U, eLen * sizeof (uint64_t));
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < eLen; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < eLen; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(e[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -231,39 +231,39 @@ pss_encode(
   KRML_CHECK_SIZE(sizeof (uint8_t), hLen);
   uint8_t m1Hash[hLen];
   memset(m1Hash, 0U, hLen * sizeof (uint8_t));
-  uint32_t m1Len = (uint32_t)8U + hLen + saltLen;
+  uint32_t m1Len = 8U + hLen + saltLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), m1Len);
   uint8_t m1[m1Len];
   memset(m1, 0U, m1Len * sizeof (uint8_t));
-  hash(a, m1 + (uint32_t)8U, msgLen, msg);
-  memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t));
+  hash(a, m1 + 8U, msgLen, msg);
+  memcpy(m1 + 8U + hLen, salt, saltLen * sizeof (uint8_t));
   hash(a, m1Hash, m1Len, m1);
-  uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t dbLen = emLen - hLen - (uint32_t)1U;
+  uint32_t emLen = (emBits - 1U) / 8U + 1U;
+  uint32_t dbLen = emLen - hLen - 1U;
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t db[dbLen];
   memset(db, 0U, dbLen * sizeof (uint8_t));
-  uint32_t last_before_salt = dbLen - saltLen - (uint32_t)1U;
-  db[last_before_salt] = (uint8_t)1U;
-  memcpy(db + last_before_salt + (uint32_t)1U, salt, saltLen * sizeof (uint8_t));
+  uint32_t last_before_salt = dbLen - saltLen - 1U;
+  db[last_before_salt] = 1U;
+  memcpy(db + last_before_salt + 1U, salt, saltLen * sizeof (uint8_t));
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t dbMask[dbLen];
   memset(dbMask, 0U, dbLen * sizeof (uint8_t));
   mgf_hash(a, hLen, m1Hash, dbLen, dbMask);
-  for (uint32_t i = (uint32_t)0U; i < dbLen; i++)
+  for (uint32_t i = 0U; i < dbLen; i++)
   {
     uint8_t *os = db;
-    uint8_t x = db[i] ^ dbMask[i];
+    uint8_t x = (uint32_t)db[i] ^ (uint32_t)dbMask[i];
     os[i] = x;
   }
-  uint32_t msBits = emBits % (uint32_t)8U;
-  if (msBits > (uint32_t)0U)
+  uint32_t msBits = emBits % 8U;
+  if (msBits > 0U)
   {
-    db[0U] = db[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits);
+    db[0U] = (uint32_t)db[0U] & 0xffU >> (8U - msBits);
   }
   memcpy(em, db, dbLen * sizeof (uint8_t));
   memcpy(em + dbLen, m1Hash, hLen * sizeof (uint8_t));
-  em[emLen - (uint32_t)1U] = (uint8_t)0xbcU;
+  em[emLen - 1U] = 0xbcU;
 }
 
 static inline bool
@@ -276,105 +276,100 @@ pss_verify(
   uint8_t *em
 )
 {
-  uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t msBits = emBits % (uint32_t)8U;
+  uint32_t emLen = (emBits - 1U) / 8U + 1U;
+  uint32_t msBits = emBits % 8U;
   uint8_t em_0;
-  if (msBits > (uint32_t)0U)
+  if (msBits > 0U)
   {
-    em_0 = em[0U] & (uint8_t)0xffU << msBits;
+    em_0 = (uint32_t)em[0U] & 0xffU << msBits;
   }
   else
   {
-    em_0 = (uint8_t)0U;
+    em_0 = 0U;
   }
-  uint8_t em_last = em[emLen - (uint32_t)1U];
-  if (emLen < saltLen + hash_len(a) + (uint32_t)2U)
+  uint8_t em_last = em[emLen - 1U];
+  if (emLen < saltLen + hash_len(a) + 2U)
   {
     return false;
   }
-  if (!(em_last == (uint8_t)0xbcU && em_0 == (uint8_t)0U))
+  if (!(em_last == 0xbcU && em_0 == 0U))
   {
     return false;
   }
-  uint32_t emLen1 = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t emLen1 = (emBits - 1U) / 8U + 1U;
   uint32_t hLen = hash_len(a);
   KRML_CHECK_SIZE(sizeof (uint8_t), hLen);
   uint8_t m1Hash0[hLen];
   memset(m1Hash0, 0U, hLen * sizeof (uint8_t));
-  uint32_t dbLen = emLen1 - hLen - (uint32_t)1U;
+  uint32_t dbLen = emLen1 - hLen - 1U;
   uint8_t *maskedDB = em;
   uint8_t *m1Hash = em + dbLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t dbMask[dbLen];
   memset(dbMask, 0U, dbLen * sizeof (uint8_t));
   mgf_hash(a, hLen, m1Hash, dbLen, dbMask);
-  for (uint32_t i = (uint32_t)0U; i < dbLen; i++)
+  for (uint32_t i = 0U; i < dbLen; i++)
   {
     uint8_t *os = dbMask;
-    uint8_t x = dbMask[i] ^ maskedDB[i];
+    uint8_t x = (uint32_t)dbMask[i] ^ (uint32_t)maskedDB[i];
     os[i] = x;
   }
-  uint32_t msBits1 = emBits % (uint32_t)8U;
-  if (msBits1 > (uint32_t)0U)
+  uint32_t msBits1 = emBits % 8U;
+  if (msBits1 > 0U)
   {
-    dbMask[0U] = dbMask[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits1);
+    dbMask[0U] = (uint32_t)dbMask[0U] & 0xffU >> (8U - msBits1);
   }
-  uint32_t padLen = emLen1 - saltLen - hLen - (uint32_t)1U;
+  uint32_t padLen = emLen1 - saltLen - hLen - 1U;
   KRML_CHECK_SIZE(sizeof (uint8_t), padLen);
   uint8_t pad2[padLen];
   memset(pad2, 0U, padLen * sizeof (uint8_t));
-  pad2[padLen - (uint32_t)1U] = (uint8_t)0x01U;
+  pad2[padLen - 1U] = 0x01U;
   uint8_t *pad = dbMask;
   uint8_t *salt = dbMask + padLen;
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < padLen; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < padLen; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(pad[i], pad2[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  if (!(z == (uint8_t)255U))
+  if (!(z == 255U))
   {
     return false;
   }
-  uint32_t m1Len = (uint32_t)8U + hLen + saltLen;
+  uint32_t m1Len = 8U + hLen + saltLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), m1Len);
   uint8_t m1[m1Len];
   memset(m1, 0U, m1Len * sizeof (uint8_t));
-  hash(a, m1 + (uint32_t)8U, msgLen, msg);
-  memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t));
+  hash(a, m1 + 8U, msgLen, msg);
+  memcpy(m1 + 8U + hLen, salt, saltLen * sizeof (uint8_t));
   hash(a, m1Hash0, m1Len, m1);
-  uint8_t res0 = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < hLen; i++)
+  uint8_t res0 = 255U;
+  for (uint32_t i = 0U; i < hLen; i++)
   {
     uint8_t uu____1 = FStar_UInt8_eq_mask(m1Hash0[i], m1Hash[i]);
-    res0 = uu____1 & res0;
+    res0 = (uint32_t)uu____1 & (uint32_t)res0;
   }
   uint8_t z0 = res0;
-  return z0 == (uint8_t)255U;
+  return z0 == 255U;
 }
 
 static inline bool
 load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb, uint64_t *pkey)
 {
-  uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey;
   uint64_t *r2 = pkey + nLen;
   uint64_t *e = pkey + nLen + nLen;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m1 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m1;
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline bool
@@ -388,16 +383,16 @@ load_skey(
   uint64_t *skey
 )
 {
-  uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t dbLen = (dBits - 1U) / 8U + 1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen + nLen + eLen;
   uint64_t *pkey = skey;
   uint64_t *d = skey + pkeyLen;
   bool b = load_pkey(modBits, eBits, nb, eb, pkey);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d);
   uint64_t m1 = check_exponent_u64(dBits, d);
-  return b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return b && m1 == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -435,45 +430,36 @@ Hacl_RSAPSS_rsapss_sign(
 {
   uint32_t hLen = hash_len(a);
   bool
-  b =
-    saltLen
-    <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U
-    &&
-      saltLen
-      + hLen
-      + (uint32_t)2U
-      <= (modBits - (uint32_t)1U - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  b = saltLen <= 0xffffffffU - hLen - 8U && saltLen + hLen + 2U <= (modBits - 1U - 1U) / 8U + 1U;
   if (b)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
     uint64_t m[nLen];
     memset(m, 0U, nLen * sizeof (uint64_t));
-    uint32_t emBits = modBits - (uint32_t)1U;
-    uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t emBits = modBits - 1U;
+    uint32_t emLen = (emBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint8_t), emLen);
     uint8_t em[emLen];
     memset(em, 0U, emLen * sizeof (uint8_t));
     pss_encode(a, saltLen, salt, msgLen, msg, emBits, em);
     Hacl_Bignum_Convert_bn_from_bytes_be_uint64(emLen, em, m);
-    uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+    uint32_t k = (modBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t s[nLen1];
     memset(s, 0U, nLen1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t m_[nLen1];
     memset(m_, 0U, nLen1 * sizeof (uint64_t));
-    uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
     uint64_t *n = skey;
     uint64_t *r2 = skey + nLen2;
     uint64_t *e = skey + nLen2 + nLen2;
     uint64_t *d = skey + nLen2 + nLen2 + eLen;
     uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - (uint32_t)1U)
-      / (uint32_t)64U
-      + (uint32_t)1U,
+    Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - 1U) / 64U + 1U,
       n,
       mu,
       r2,
@@ -482,9 +468,7 @@ Hacl_RSAPSS_rsapss_sign(
       d,
       s);
     uint64_t mu0 = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - (uint32_t)1U)
-      / (uint32_t)64U
-      + (uint32_t)1U,
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U,
       n,
       mu0,
       r2,
@@ -492,22 +476,22 @@ Hacl_RSAPSS_rsapss_sign(
       eBits,
       e,
       m_);
-    uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t uu____0 = FStar_UInt64_eq_mask(m[i], m_[i]);
       mask = uu____0 & mask;
     }
     uint64_t mask1 = mask;
     uint64_t eq_m = mask1;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t *os = s;
       uint64_t x = s[i];
       uint64_t x0 = eq_m & x;
       os[i] = x0;
     }
-    bool eq_b = eq_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    bool eq_b = eq_m == 0xFFFFFFFFFFFFFFFFULL;
     Hacl_Bignum_Convert_bn_to_bytes_be_uint64(k, s, sgnt);
     bool eq_b0 = eq_b;
     return eq_b0;
@@ -547,42 +531,36 @@ Hacl_RSAPSS_rsapss_verify(
 )
 {
   uint32_t hLen = hash_len(a);
-  bool
-  b =
-    saltLen
-    <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U
-    && sgntLen == (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  bool b = saltLen <= 0xffffffffU - hLen - 8U && sgntLen == (modBits - 1U) / 8U + 1U;
   if (b)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
     uint64_t m[nLen];
     memset(m, 0U, nLen * sizeof (uint64_t));
-    uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+    uint32_t k = (modBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t s[nLen1];
     memset(s, 0U, nLen1 * sizeof (uint64_t));
     Hacl_Bignum_Convert_bn_from_bytes_be_uint64(k, sgnt, s);
-    uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
     uint64_t *n = pkey;
     uint64_t *r2 = pkey + nLen2;
     uint64_t *e = pkey + nLen2 + nLen2;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(s[i], n[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(s[i], n[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t mask = acc;
     bool res;
-    if (mask == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+    if (mask == 0xFFFFFFFFFFFFFFFFULL)
     {
       uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-      Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - (uint32_t)1U)
-        / (uint32_t)64U
-        + (uint32_t)1U,
+      Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U,
         n,
         mu,
         r2,
@@ -591,17 +569,17 @@ Hacl_RSAPSS_rsapss_verify(
         e,
         m);
       bool ite;
-      if (!((modBits - (uint32_t)1U) % (uint32_t)8U == (uint32_t)0U))
+      if (!((modBits - 1U) % 8U == 0U))
       {
         ite = true;
       }
       else
       {
-        uint32_t i = (modBits - (uint32_t)1U) / (uint32_t)64U;
-        uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U;
+        uint32_t i = (modBits - 1U) / 64U;
+        uint32_t j = (modBits - 1U) % 64U;
         uint64_t tmp = m[i];
-        uint64_t get_bit = tmp >> j & (uint64_t)1U;
-        ite = get_bit == (uint64_t)0U;
+        uint64_t get_bit = tmp >> j & 1ULL;
+        ite = get_bit == 0ULL;
       }
       if (ite)
       {
@@ -620,8 +598,8 @@ Hacl_RSAPSS_rsapss_verify(
     bool b10 = b1;
     if (b10)
     {
-      uint32_t emBits = modBits - (uint32_t)1U;
-      uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+      uint32_t emBits = modBits - 1U;
+      uint32_t emLen = (emBits - 1U) / 8U + 1U;
       KRML_CHECK_SIZE(sizeof (uint8_t), emLen);
       uint8_t em[emLen];
       memset(em, 0U, emLen * sizeof (uint8_t));
@@ -649,15 +627,11 @@ uint64_t
 *Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb)
 {
   bool ite;
-  if ((uint32_t)1U < modBits && (uint32_t)0U < eBits)
+  if (1U < modBits && 0U < eBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite =
-      nLen
-      <= (uint32_t)33554431U
-      && eLen <= (uint32_t)67108863U
-      && nLen + nLen <= (uint32_t)0xffffffffU - eLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    ite = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen;
   }
   else
   {
@@ -667,8 +641,8 @@ uint64_t
   {
     return NULL;
   }
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen + nLen + eLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), pkeyLen);
   uint64_t *pkey = (uint64_t *)KRML_HOST_CALLOC(pkeyLen, sizeof (uint64_t));
@@ -678,24 +652,19 @@ uint64_t
   }
   uint64_t *pkey1 = pkey;
   uint64_t *pkey2 = pkey1;
-  uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey2;
   uint64_t *r2 = pkey2 + nLen1;
   uint64_t *e = pkey2 + nLen1 + nLen1;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m1 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m1;
-  bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b = m == 0xFFFFFFFFFFFFFFFFULL;
   if (b)
   {
     return pkey2;
@@ -727,27 +696,23 @@ uint64_t
 )
 {
   bool ite0;
-  if ((uint32_t)1U < modBits && (uint32_t)0U < eBits)
+  if (1U < modBits && 0U < eBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite0 =
-      nLen
-      <= (uint32_t)33554431U
-      && eLen <= (uint32_t)67108863U
-      && nLen + nLen <= (uint32_t)0xffffffffU - eLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    ite0 = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen;
   }
   else
   {
     ite0 = false;
   }
   bool ite;
-  if (ite0 && (uint32_t)0U < dBits)
+  if (ite0 && 0U < dBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite = dLen <= (uint32_t)67108863U && (uint32_t)2U * nLen <= (uint32_t)0xffffffffU - eLen - dLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    uint32_t dLen = (dBits - 1U) / 64U + 1U;
+    ite = dLen <= 67108863U && 2U * nLen <= 0xffffffffU - eLen - dLen;
   }
   else
   {
@@ -757,9 +722,9 @@ uint64_t
   {
     return NULL;
   }
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
+  uint32_t dLen = (dBits - 1U) / 64U + 1U;
   uint32_t skeyLen = nLen + nLen + eLen + dLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), skeyLen);
   uint64_t *skey = (uint64_t *)KRML_HOST_CALLOC(skeyLen, sizeof (uint64_t));
@@ -769,33 +734,28 @@ uint64_t
   }
   uint64_t *skey1 = skey;
   uint64_t *skey2 = skey1;
-  uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen1 = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t dbLen = (dBits - 1U) / 8U + 1U;
+  uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen1 = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen1 + nLen1 + eLen1;
   uint64_t *pkey = skey2;
   uint64_t *d = skey2 + pkeyLen;
-  uint32_t nbLen1 = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen1 = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen1 = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen1 = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey;
   uint64_t *r2 = pkey + nLen2;
   uint64_t *e = pkey + nLen2 + nLen2;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen1, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen1, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m10 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m10;
-  bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b = m == 0xFFFFFFFFFFFFFFFFULL;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d);
   uint64_t m1 = check_exponent_u64(dBits, d);
-  bool b0 = b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b0 = b && m1 == 0xFFFFFFFFFFFFFFFFULL;
   if (b0)
   {
     return skey2;
@@ -842,21 +802,12 @@ Hacl_RSAPSS_rsapss_skey_sign(
 )
 {
   KRML_CHECK_SIZE(sizeof (uint64_t),
-    (uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-    + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U);
+    2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U);
   uint64_t
-  skey[(uint32_t)2U
-  * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-  + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-  + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U];
+  skey[2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U];
   memset(skey,
     0U,
-    ((uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-    + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
+    (2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U)
     * sizeof (uint64_t));
   bool b = load_skey(modBits, eBits, dBits, nb, eb, db, skey);
   if (b)
@@ -909,20 +860,11 @@ Hacl_RSAPSS_rsapss_pkey_verify(
   uint8_t *msg
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t),
-    (uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U);
-  uint64_t
-  pkey[(uint32_t)2U
-  * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-  + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U];
+  KRML_CHECK_SIZE(sizeof (uint64_t), 2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U);
+  uint64_t pkey[2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U];
   memset(pkey,
     0U,
-    ((uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    * sizeof (uint64_t));
+    (2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U) * sizeof (uint64_t));
   bool b = load_pkey(modBits, eBits, nb, eb, pkey);
   if (b)
   {
diff --git a/src/Hacl_SHA2_Vec128.c b/src/Hacl_SHA2_Vec128.c
index e1b6e304..02af75b1 100644
--- a/src/Hacl_SHA2_Vec128.c
+++ b/src/Hacl_SHA2_Vec128.c
@@ -32,21 +32,21 @@
 static inline void sha224_init4(Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
-    uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i];
+    uint32_t hi = Hacl_Hash_SHA2_h224[i];
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi);
     os[i] = x;);
 }
 
 static inline void
-sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash)
+sha224_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -55,18 +55,18 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 16U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 16U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 16U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 16U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 48U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 48U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 48U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 48U);
   Lib_IntVector_Intrinsics_vec128 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec128 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec128 v20 = ws[2U];
@@ -196,14 +196,14 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec128 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec128 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec128 b0 = hash[1U];
@@ -218,10 +218,10 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       t1 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02,
                 Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))),
             k_e_t),
@@ -229,10 +229,10 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       Lib_IntVector_Intrinsics_vec128
       t2 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0),
               Lib_IntVector_Intrinsics_vec128_and(b0, c0))));
@@ -252,30 +252,30 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec128 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec128
         s1 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec128
         s0 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1,
                 t7),
@@ -283,9 +283,9 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     Lib_IntVector_Intrinsics_vec128
     x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]);
@@ -295,22 +295,22 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
 static inline void
 sha224_update_nblocks4(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec128 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    Hacl_Impl_SHA2_Types_uint8_4p
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    Hacl_Hash_SHA2_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha224_update4(mb, st);
   }
@@ -320,69 +320,69 @@ static inline void
 sha224_update_last4(
   uint64_t totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec128 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[512U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)64U;
+  uint8_t *last11 = last3 + 64U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } };
-  Hacl_Impl_SHA2_Types_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd;
   sha224_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha224_update4(last1, hash);
     return;
@@ -390,7 +390,7 @@ sha224_update_last4(
 }
 
 static inline void
-sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4p h)
+sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h)
 {
   uint8_t hbuf[128U] = { 0U };
   Lib_IntVector_Intrinsics_vec128 v00 = st[0U];
@@ -458,18 +458,18 @@ sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * 16U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)28U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 28U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 28U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 28U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 28U * sizeof (uint8_t));
 }
 
 void
@@ -485,16 +485,16 @@ Hacl_SHA2_Vec128_sha224_4(
   uint8_t *input3
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U };
   sha224_init4(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -503,7 +503,7 @@ Hacl_SHA2_Vec128_sha224_4(
   uint8_t *bl1 = b1 + input_len - rem1;
   uint8_t *bl2 = b2 + input_len - rem1;
   uint8_t *bl3 = b3 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
   sha224_update_last4(len_, rem, lb, st);
   sha224_finish4(st, rb);
@@ -512,21 +512,21 @@ Hacl_SHA2_Vec128_sha224_4(
 static inline void sha256_init4(Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
-    uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t hi = Hacl_Hash_SHA2_h256[i];
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi);
     os[i] = x;);
 }
 
 static inline void
-sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash)
+sha256_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -535,18 +535,18 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 16U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 16U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 16U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 16U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 48U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 48U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 48U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 48U);
   Lib_IntVector_Intrinsics_vec128 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec128 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec128 v20 = ws[2U];
@@ -676,14 +676,14 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec128 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec128 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec128 b0 = hash[1U];
@@ -698,10 +698,10 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       t1 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02,
                 Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))),
             k_e_t),
@@ -709,10 +709,10 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       Lib_IntVector_Intrinsics_vec128
       t2 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0),
               Lib_IntVector_Intrinsics_vec128_and(b0, c0))));
@@ -732,30 +732,30 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec128 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec128
         s1 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec128
         s0 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1,
                 t7),
@@ -763,9 +763,9 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     Lib_IntVector_Intrinsics_vec128
     x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]);
@@ -775,22 +775,22 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
 static inline void
 sha256_update_nblocks4(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec128 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    Hacl_Impl_SHA2_Types_uint8_4p
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    Hacl_Hash_SHA2_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha256_update4(mb, st);
   }
@@ -800,69 +800,69 @@ static inline void
 sha256_update_last4(
   uint64_t totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec128 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[512U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)64U;
+  uint8_t *last11 = last3 + 64U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } };
-  Hacl_Impl_SHA2_Types_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd;
   sha256_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update4(last1, hash);
     return;
@@ -870,7 +870,7 @@ sha256_update_last4(
 }
 
 static inline void
-sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4p h)
+sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h)
 {
   uint8_t hbuf[128U] = { 0U };
   Lib_IntVector_Intrinsics_vec128 v00 = st[0U];
@@ -938,18 +938,18 @@ sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * 16U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 32U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 32U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 32U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 32U * sizeof (uint8_t));
 }
 
 void
@@ -965,16 +965,16 @@ Hacl_SHA2_Vec128_sha256_4(
   uint8_t *input3
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U };
   sha256_init4(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha256_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -983,7 +983,7 @@ Hacl_SHA2_Vec128_sha256_4(
   uint8_t *bl1 = b1 + input_len - rem1;
   uint8_t *bl2 = b2 + input_len - rem1;
   uint8_t *bl3 = b3 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
   sha256_update_last4(len_, rem, lb, st);
   sha256_finish4(st, rb);
diff --git a/src/Hacl_SHA2_Vec256.c b/src/Hacl_SHA2_Vec256.c
index b74ce621..c34767f5 100644
--- a/src/Hacl_SHA2_Vec256.c
+++ b/src/Hacl_SHA2_Vec256.c
@@ -33,21 +33,21 @@
 static inline void sha224_init8(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
-    uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i];
+    uint32_t hi = Hacl_Hash_SHA2_h224[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi);
     os[i] = x;);
 }
 
 static inline void
-sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash)
+sha224_update8(Hacl_Hash_SHA2_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = b.snd.snd.snd.snd.snd.fst;
@@ -64,14 +64,14 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5);
   ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6);
   ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + 32U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + 32U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + 32U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + 32U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -281,14 +281,14 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -303,10 +303,10 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -314,10 +314,10 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -337,30 +337,30 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1,
                 t7),
@@ -368,9 +368,9 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]);
@@ -380,12 +380,12 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
 static inline void
 sha224_update_nblocks8(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_8p b,
+  Hacl_Hash_SHA2_uint8_8p b,
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
     uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -395,15 +395,15 @@ sha224_update_nblocks8(
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    uint8_t *bl4 = b4 + i * (uint32_t)64U;
-    uint8_t *bl5 = b5 + i * (uint32_t)64U;
-    uint8_t *bl6 = b6 + i * (uint32_t)64U;
-    uint8_t *bl7 = b7 + i * (uint32_t)64U;
-    Hacl_Impl_SHA2_Types_uint8_8p
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    uint8_t *bl4 = b4 + i * 64U;
+    uint8_t *bl5 = b5 + i * 64U;
+    uint8_t *bl6 = b6 + i * 64U;
+    uint8_t *bl7 = b7 + i * 64U;
+    Hacl_Hash_SHA2_uint8_8p
     mb =
       {
         .fst = bl0,
@@ -426,23 +426,23 @@ static inline void
 sha224_update_last8(
   uint64_t totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_8p b,
+  Hacl_Hash_SHA2_uint8_8p b,
   Lib_IntVector_Intrinsics_vec256 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -453,70 +453,70 @@ sha224_update_last8(
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
-  uint8_t *last4 = last + (uint32_t)512U;
-  uint8_t *last5 = last + (uint32_t)640U;
-  uint8_t *last6 = last + (uint32_t)768U;
-  uint8_t *last7 = last + (uint32_t)896U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
+  uint8_t *last4 = last + 512U;
+  uint8_t *last5 = last + 640U;
+  uint8_t *last6 = last + 768U;
+  uint8_t *last7 = last + 896U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last013 = last3;
-  uint8_t *last113 = last3 + (uint32_t)64U;
+  uint8_t *last113 = last3 + 64U;
   uint8_t *l30 = last013;
   uint8_t *l31 = last113;
   memcpy(last4, b4, len * sizeof (uint8_t));
-  last4[len] = (uint8_t)0x80U;
-  memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last4[len] = 0x80U;
+  memcpy(last4 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last014 = last4;
-  uint8_t *last114 = last4 + (uint32_t)64U;
+  uint8_t *last114 = last4 + 64U;
   uint8_t *l40 = last014;
   uint8_t *l41 = last114;
   memcpy(last5, b5, len * sizeof (uint8_t));
-  last5[len] = (uint8_t)0x80U;
-  memcpy(last5 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last5[len] = 0x80U;
+  memcpy(last5 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last015 = last5;
-  uint8_t *last115 = last5 + (uint32_t)64U;
+  uint8_t *last115 = last5 + 64U;
   uint8_t *l50 = last015;
   uint8_t *l51 = last115;
   memcpy(last6, b6, len * sizeof (uint8_t));
-  last6[len] = (uint8_t)0x80U;
-  memcpy(last6 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last6[len] = 0x80U;
+  memcpy(last6 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last016 = last6;
-  uint8_t *last116 = last6 + (uint32_t)64U;
+  uint8_t *last116 = last6 + 64U;
   uint8_t *l60 = last016;
   uint8_t *l61 = last116;
   memcpy(last7, b7, len * sizeof (uint8_t));
-  last7[len] = (uint8_t)0x80U;
-  memcpy(last7 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last7[len] = 0x80U;
+  memcpy(last7 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last7;
-  uint8_t *last11 = last7 + (uint32_t)64U;
+  uint8_t *last11 = last7 + 64U;
   uint8_t *l70 = last01;
   uint8_t *l71 = last11;
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   mb0 =
     {
       .fst = l00,
@@ -531,7 +531,7 @@ sha224_update_last8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   mb1 =
     {
       .fst = l01,
@@ -546,11 +546,11 @@ sha224_update_last8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_2x8p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_8p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_8p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x8p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_8p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_8p last1 = scrut.snd;
   sha224_update8(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha224_update8(last1, hash);
     return;
@@ -558,7 +558,7 @@ sha224_update_last8(
 }
 
 static inline void
-sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8p h)
+sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_8p h)
 {
   uint8_t hbuf[256U] = { 0U };
   Lib_IntVector_Intrinsics_vec256 v0 = st[0U];
@@ -662,10 +662,10 @@ sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   st[6U] = st6_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * 32U, st[i]););
   uint8_t *b7 = h.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = h.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = h.snd.snd.snd.snd.snd.fst;
@@ -674,14 +674,14 @@ sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b4, hbuf + (uint32_t)128U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b5, hbuf + (uint32_t)160U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b6, hbuf + (uint32_t)192U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b7, hbuf + (uint32_t)224U, (uint32_t)28U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 28U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 28U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 28U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 28U * sizeof (uint8_t));
+  memcpy(b4, hbuf + 128U, 28U * sizeof (uint8_t));
+  memcpy(b5, hbuf + 160U, 28U * sizeof (uint8_t));
+  memcpy(b6, hbuf + 192U, 28U * sizeof (uint8_t));
+  memcpy(b7, hbuf + 224U, 28U * sizeof (uint8_t));
 }
 
 void
@@ -705,7 +705,7 @@ Hacl_SHA2_Vec256_sha224_8(
   uint8_t *input7
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   ib =
     {
       .fst = input0,
@@ -723,7 +723,7 @@ Hacl_SHA2_Vec256_sha224_8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   rb =
     {
       .fst = dst0,
@@ -740,10 +740,10 @@ Hacl_SHA2_Vec256_sha224_8(
     };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha224_init8(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks8(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst;
@@ -760,7 +760,7 @@ Hacl_SHA2_Vec256_sha224_8(
   uint8_t *bl5 = b5 + input_len - rem1;
   uint8_t *bl6 = b6 + input_len - rem1;
   uint8_t *bl7 = b7 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   lb =
     {
       .fst = bl0,
@@ -782,21 +782,21 @@ Hacl_SHA2_Vec256_sha224_8(
 static inline void sha256_init8(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
-    uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t hi = Hacl_Hash_SHA2_h256[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi);
     os[i] = x;);
 }
 
 static inline void
-sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash)
+sha256_update8(Hacl_Hash_SHA2_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = b.snd.snd.snd.snd.snd.fst;
@@ -813,14 +813,14 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5);
   ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6);
   ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + 32U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + 32U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + 32U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + 32U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -1030,14 +1030,14 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -1052,10 +1052,10 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -1063,10 +1063,10 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -1086,30 +1086,30 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1,
                 t7),
@@ -1117,9 +1117,9 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]);
@@ -1129,12 +1129,12 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
 static inline void
 sha256_update_nblocks8(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_8p b,
+  Hacl_Hash_SHA2_uint8_8p b,
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
     uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -1144,15 +1144,15 @@ sha256_update_nblocks8(
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    uint8_t *bl4 = b4 + i * (uint32_t)64U;
-    uint8_t *bl5 = b5 + i * (uint32_t)64U;
-    uint8_t *bl6 = b6 + i * (uint32_t)64U;
-    uint8_t *bl7 = b7 + i * (uint32_t)64U;
-    Hacl_Impl_SHA2_Types_uint8_8p
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    uint8_t *bl4 = b4 + i * 64U;
+    uint8_t *bl5 = b5 + i * 64U;
+    uint8_t *bl6 = b6 + i * 64U;
+    uint8_t *bl7 = b7 + i * 64U;
+    Hacl_Hash_SHA2_uint8_8p
     mb =
       {
         .fst = bl0,
@@ -1175,23 +1175,23 @@ static inline void
 sha256_update_last8(
   uint64_t totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_8p b,
+  Hacl_Hash_SHA2_uint8_8p b,
   Lib_IntVector_Intrinsics_vec256 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -1202,70 +1202,70 @@ sha256_update_last8(
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
-  uint8_t *last4 = last + (uint32_t)512U;
-  uint8_t *last5 = last + (uint32_t)640U;
-  uint8_t *last6 = last + (uint32_t)768U;
-  uint8_t *last7 = last + (uint32_t)896U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
+  uint8_t *last4 = last + 512U;
+  uint8_t *last5 = last + 640U;
+  uint8_t *last6 = last + 768U;
+  uint8_t *last7 = last + 896U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last013 = last3;
-  uint8_t *last113 = last3 + (uint32_t)64U;
+  uint8_t *last113 = last3 + 64U;
   uint8_t *l30 = last013;
   uint8_t *l31 = last113;
   memcpy(last4, b4, len * sizeof (uint8_t));
-  last4[len] = (uint8_t)0x80U;
-  memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last4[len] = 0x80U;
+  memcpy(last4 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last014 = last4;
-  uint8_t *last114 = last4 + (uint32_t)64U;
+  uint8_t *last114 = last4 + 64U;
   uint8_t *l40 = last014;
   uint8_t *l41 = last114;
   memcpy(last5, b5, len * sizeof (uint8_t));
-  last5[len] = (uint8_t)0x80U;
-  memcpy(last5 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last5[len] = 0x80U;
+  memcpy(last5 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last015 = last5;
-  uint8_t *last115 = last5 + (uint32_t)64U;
+  uint8_t *last115 = last5 + 64U;
   uint8_t *l50 = last015;
   uint8_t *l51 = last115;
   memcpy(last6, b6, len * sizeof (uint8_t));
-  last6[len] = (uint8_t)0x80U;
-  memcpy(last6 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last6[len] = 0x80U;
+  memcpy(last6 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last016 = last6;
-  uint8_t *last116 = last6 + (uint32_t)64U;
+  uint8_t *last116 = last6 + 64U;
   uint8_t *l60 = last016;
   uint8_t *l61 = last116;
   memcpy(last7, b7, len * sizeof (uint8_t));
-  last7[len] = (uint8_t)0x80U;
-  memcpy(last7 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last7[len] = 0x80U;
+  memcpy(last7 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last7;
-  uint8_t *last11 = last7 + (uint32_t)64U;
+  uint8_t *last11 = last7 + 64U;
   uint8_t *l70 = last01;
   uint8_t *l71 = last11;
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   mb0 =
     {
       .fst = l00,
@@ -1280,7 +1280,7 @@ sha256_update_last8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   mb1 =
     {
       .fst = l01,
@@ -1295,11 +1295,11 @@ sha256_update_last8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_2x8p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_8p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_8p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x8p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_8p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_8p last1 = scrut.snd;
   sha256_update8(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update8(last1, hash);
     return;
@@ -1307,7 +1307,7 @@ sha256_update_last8(
 }
 
 static inline void
-sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8p h)
+sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_8p h)
 {
   uint8_t hbuf[256U] = { 0U };
   Lib_IntVector_Intrinsics_vec256 v0 = st[0U];
@@ -1411,10 +1411,10 @@ sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   st[6U] = st6_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * 32U, st[i]););
   uint8_t *b7 = h.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = h.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = h.snd.snd.snd.snd.snd.fst;
@@ -1423,14 +1423,14 @@ sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b4, hbuf + (uint32_t)128U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b5, hbuf + (uint32_t)160U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b6, hbuf + (uint32_t)192U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b7, hbuf + (uint32_t)224U, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 32U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 32U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 32U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 32U * sizeof (uint8_t));
+  memcpy(b4, hbuf + 128U, 32U * sizeof (uint8_t));
+  memcpy(b5, hbuf + 160U, 32U * sizeof (uint8_t));
+  memcpy(b6, hbuf + 192U, 32U * sizeof (uint8_t));
+  memcpy(b7, hbuf + 224U, 32U * sizeof (uint8_t));
 }
 
 void
@@ -1454,7 +1454,7 @@ Hacl_SHA2_Vec256_sha256_8(
   uint8_t *input7
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   ib =
     {
       .fst = input0,
@@ -1472,7 +1472,7 @@ Hacl_SHA2_Vec256_sha256_8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   rb =
     {
       .fst = dst0,
@@ -1489,10 +1489,10 @@ Hacl_SHA2_Vec256_sha256_8(
     };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha256_init8(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha256_update_nblocks8(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst;
@@ -1509,7 +1509,7 @@ Hacl_SHA2_Vec256_sha256_8(
   uint8_t *bl5 = b5 + input_len - rem1;
   uint8_t *bl6 = b6 + input_len - rem1;
   uint8_t *bl7 = b7 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   lb =
     {
       .fst = bl0,
@@ -1531,21 +1531,21 @@ Hacl_SHA2_Vec256_sha256_8(
 static inline void sha384_init4(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
-    uint64_t hi = Hacl_Impl_SHA2_Generic_h384[i];
+    uint64_t hi = Hacl_Hash_SHA2_h384[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi);
     os[i] = x;);
 }
 
 static inline void
-sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash)
+sha384_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -1554,18 +1554,18 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 32U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 32U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 32U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 64U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 64U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 64U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 64U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 96U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 96U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 96U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 96U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -1679,14 +1679,14 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Hash_SHA2_k384_512[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -1701,10 +1701,10 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                    (uint32_t)14U),
+                    14U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                      (uint32_t)18U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))),
+                      18U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, 41U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -1712,10 +1712,10 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-              (uint32_t)28U),
+              28U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-                (uint32_t)34U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))),
+                34U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, 39U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -1735,30 +1735,30 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-              (uint32_t)19U),
+              19U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-                (uint32_t)61U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U)));
+                61U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, 6U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-              (uint32_t)1U),
+              1U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-                (uint32_t)8U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U)));
+                8U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, 7U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1,
                 t7),
@@ -1766,9 +1766,9 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]);
@@ -1778,22 +1778,22 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
 static inline void
 sha384_update_nblocks4(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)128U;
-    uint8_t *bl1 = b1 + i * (uint32_t)128U;
-    uint8_t *bl2 = b2 + i * (uint32_t)128U;
-    uint8_t *bl3 = b3 + i * (uint32_t)128U;
-    Hacl_Impl_SHA2_Types_uint8_4p
+    uint8_t *bl0 = b0 + i * 128U;
+    uint8_t *bl1 = b1 + i * 128U;
+    uint8_t *bl2 = b2 + i * 128U;
+    uint8_t *bl3 = b3 + i * 128U;
+    Hacl_Hash_SHA2_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha384_update4(mb, st);
   }
@@ -1803,69 +1803,69 @@ static inline void
 sha384_update_last4(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec256 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)256U;
-  uint8_t *last2 = last + (uint32_t)512U;
-  uint8_t *last3 = last + (uint32_t)768U;
+  uint8_t *last10 = last + 256U;
+  uint8_t *last2 = last + 512U;
+  uint8_t *last3 = last + 768U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)128U;
+  uint8_t *last110 = last00 + 128U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)128U;
+  uint8_t *last111 = last10 + 128U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)128U;
+  uint8_t *last112 = last2 + 128U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)128U;
+  uint8_t *last11 = last3 + 128U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } };
-  Hacl_Impl_SHA2_Types_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd;
   sha384_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha384_update4(last1, hash);
     return;
@@ -1873,7 +1873,7 @@ sha384_update_last4(
 }
 
 static inline void
-sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4p h)
+sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h)
 {
   uint8_t hbuf[256U] = { 0U };
   Lib_IntVector_Intrinsics_vec256 v00 = st[0U];
@@ -1933,18 +1933,18 @@ sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * 32U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)48U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 48U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 64U, 48U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 128U, 48U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 192U, 48U * sizeof (uint8_t));
 }
 
 void
@@ -1960,16 +1960,16 @@ Hacl_SHA2_Vec256_sha384_4(
   uint8_t *input3
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha384_init4(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   sha384_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -1978,7 +1978,7 @@ Hacl_SHA2_Vec256_sha384_4(
   uint8_t *bl1 = b1 + input_len - rem1;
   uint8_t *bl2 = b2 + input_len - rem1;
   uint8_t *bl3 = b3 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
   sha384_update_last4(len_, rem, lb, st);
   sha384_finish4(st, rb);
@@ -1987,21 +1987,21 @@ Hacl_SHA2_Vec256_sha384_4(
 static inline void sha512_init4(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
-    uint64_t hi = Hacl_Impl_SHA2_Generic_h512[i];
+    uint64_t hi = Hacl_Hash_SHA2_h512[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi);
     os[i] = x;);
 }
 
 static inline void
-sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash)
+sha512_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -2010,18 +2010,18 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 32U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 32U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 32U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 64U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 64U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 64U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 64U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 96U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 96U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 96U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 96U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -2135,14 +2135,14 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Hash_SHA2_k384_512[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -2157,10 +2157,10 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                    (uint32_t)14U),
+                    14U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                      (uint32_t)18U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))),
+                      18U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, 41U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -2168,10 +2168,10 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-              (uint32_t)28U),
+              28U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-                (uint32_t)34U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))),
+                34U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, 39U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -2191,30 +2191,30 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-              (uint32_t)19U),
+              19U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-                (uint32_t)61U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U)));
+                61U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, 6U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-              (uint32_t)1U),
+              1U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-                (uint32_t)8U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U)));
+                8U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, 7U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1,
                 t7),
@@ -2222,9 +2222,9 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]);
@@ -2234,22 +2234,22 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
 static inline void
 sha512_update_nblocks4(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)128U;
-    uint8_t *bl1 = b1 + i * (uint32_t)128U;
-    uint8_t *bl2 = b2 + i * (uint32_t)128U;
-    uint8_t *bl3 = b3 + i * (uint32_t)128U;
-    Hacl_Impl_SHA2_Types_uint8_4p
+    uint8_t *bl0 = b0 + i * 128U;
+    uint8_t *bl1 = b1 + i * 128U;
+    uint8_t *bl2 = b2 + i * 128U;
+    uint8_t *bl3 = b3 + i * 128U;
+    Hacl_Hash_SHA2_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha512_update4(mb, st);
   }
@@ -2259,69 +2259,69 @@ static inline void
 sha512_update_last4(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec256 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)256U;
-  uint8_t *last2 = last + (uint32_t)512U;
-  uint8_t *last3 = last + (uint32_t)768U;
+  uint8_t *last10 = last + 256U;
+  uint8_t *last2 = last + 512U;
+  uint8_t *last3 = last + 768U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)128U;
+  uint8_t *last110 = last00 + 128U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)128U;
+  uint8_t *last111 = last10 + 128U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)128U;
+  uint8_t *last112 = last2 + 128U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)128U;
+  uint8_t *last11 = last3 + 128U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } };
-  Hacl_Impl_SHA2_Types_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd;
   sha512_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha512_update4(last1, hash);
     return;
@@ -2329,7 +2329,7 @@ sha512_update_last4(
 }
 
 static inline void
-sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4p h)
+sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h)
 {
   uint8_t hbuf[256U] = { 0U };
   Lib_IntVector_Intrinsics_vec256 v00 = st[0U];
@@ -2389,18 +2389,18 @@ sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * 32U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 64U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 64U, 64U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 128U, 64U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 192U, 64U * sizeof (uint8_t));
 }
 
 void
@@ -2416,16 +2416,16 @@ Hacl_SHA2_Vec256_sha512_4(
   uint8_t *input3
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha512_init4(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   sha512_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -2434,7 +2434,7 @@ Hacl_SHA2_Vec256_sha512_4(
   uint8_t *bl1 = b1 + input_len - rem1;
   uint8_t *bl2 = b2 + input_len - rem1;
   uint8_t *bl3 = b3 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
   sha512_update_last4(len_, rem, lb, st);
   sha512_finish4(st, rb);
diff --git a/src/Hacl_Salsa20.c b/src/Hacl_Salsa20.c
index 2758f8a4..151df07d 100644
--- a/src/Hacl_Salsa20.c
+++ b/src/Hacl_Salsa20.c
@@ -30,35 +30,35 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t sta = st[b];
   uint32_t stb0 = st[a];
   uint32_t std0 = st[d];
-  uint32_t sta1 = sta ^ ((stb0 + std0) << (uint32_t)7U | (stb0 + std0) >> (uint32_t)25U);
+  uint32_t sta1 = sta ^ ((stb0 + std0) << 7U | (stb0 + std0) >> 25U);
   st[b] = sta1;
   uint32_t sta0 = st[c];
   uint32_t stb1 = st[b];
   uint32_t std1 = st[a];
-  uint32_t sta10 = sta0 ^ ((stb1 + std1) << (uint32_t)9U | (stb1 + std1) >> (uint32_t)23U);
+  uint32_t sta10 = sta0 ^ ((stb1 + std1) << 9U | (stb1 + std1) >> 23U);
   st[c] = sta10;
   uint32_t sta2 = st[d];
   uint32_t stb2 = st[c];
   uint32_t std2 = st[b];
-  uint32_t sta11 = sta2 ^ ((stb2 + std2) << (uint32_t)13U | (stb2 + std2) >> (uint32_t)19U);
+  uint32_t sta11 = sta2 ^ ((stb2 + std2) << 13U | (stb2 + std2) >> 19U);
   st[d] = sta11;
   uint32_t sta3 = st[a];
   uint32_t stb = st[d];
   uint32_t std = st[c];
-  uint32_t sta12 = sta3 ^ ((stb + std) << (uint32_t)18U | (stb + std) >> (uint32_t)14U);
+  uint32_t sta12 = sta3 ^ ((stb + std) << 18U | (stb + std) >> 14U);
   st[a] = sta12;
 }
 
 static inline void double_round(uint32_t *st)
 {
-  quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U, (uint32_t)1U);
-  quarter_round(st, (uint32_t)10U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U);
-  quarter_round(st, (uint32_t)15U, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U);
-  quarter_round(st, (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U);
-  quarter_round(st, (uint32_t)5U, (uint32_t)6U, (uint32_t)7U, (uint32_t)4U);
-  quarter_round(st, (uint32_t)10U, (uint32_t)11U, (uint32_t)8U, (uint32_t)9U);
-  quarter_round(st, (uint32_t)15U, (uint32_t)12U, (uint32_t)13U, (uint32_t)14U);
+  quarter_round(st, 0U, 4U, 8U, 12U);
+  quarter_round(st, 5U, 9U, 13U, 1U);
+  quarter_round(st, 10U, 14U, 2U, 6U);
+  quarter_round(st, 15U, 3U, 7U, 11U);
+  quarter_round(st, 0U, 1U, 2U, 3U);
+  quarter_round(st, 5U, 6U, 7U, 4U);
+  quarter_round(st, 10U, 11U, 8U, 9U);
+  quarter_round(st, 15U, 12U, 13U, 14U);
 }
 
 static inline void rounds(uint32_t *st)
@@ -77,14 +77,14 @@ static inline void rounds(uint32_t *st)
 
 static inline void salsa20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
   uint32_t ctr_u32 = ctr;
   k[8U] = k[8U] + ctr_u32;
   rounds(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -98,42 +98,38 @@ static inline void salsa20_key_block0(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k1 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
-  ctx[8U] = (uint32_t)0U;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
-  salsa20_core(k, ctx, (uint32_t)0U);
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, k[i]););
+  uint32_t *k1 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
+  ctx[8U] = 0U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k1, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
+  salsa20_core(k, ctx, 0U);
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(out + i * 4U, k[i]););
 }
 
 static inline void
@@ -150,101 +146,93 @@ salsa20_encrypt(
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k10 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
+  uint32_t *k10 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
   ctx[8U] = ctr;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k10, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   uint32_t k[16U] = { 0U };
-  KRML_HOST_IGNORE(k);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  KRML_MAYBE_UNUSED_VAR(k);
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = text + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = text + i0 * 64U;
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, i0);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = uu____1 + i * (uint32_t)4U;
+      uint8_t *bj = uu____1 + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(uu____0 + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, nb);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = plain + i * (uint32_t)4U;
+      uint8_t *bj = plain + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(plain + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -263,101 +251,93 @@ salsa20_decrypt(
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k10 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
+  uint32_t *k10 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
   ctx[8U] = ctr;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k10, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   uint32_t k[16U] = { 0U };
-  KRML_HOST_IGNORE(k);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  KRML_MAYBE_UNUSED_VAR(k);
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = cipher + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = cipher + i0 * 64U;
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, i0);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = uu____1 + i * (uint32_t)4U;
+      uint8_t *bj = uu____1 + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(uu____0 + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, nb);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = plain + i * (uint32_t)4U;
+      uint8_t *bj = plain + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(plain + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -368,34 +348,34 @@ static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t k32[8U] = { 0U };
   uint32_t n32[4U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   uint32_t *k0 = k32;
-  uint32_t *k1 = k32 + (uint32_t)4U;
-  ctx[0U] = (uint32_t)0x61707865U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)4U * sizeof (uint32_t));
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  uint32_t *k1 = k32 + 4U;
+  ctx[0U] = 0x61707865U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 4U * sizeof (uint32_t));
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k1, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   rounds(ctx);
   uint32_t r0 = ctx[0U];
   uint32_t r1 = ctx[5U];
@@ -406,11 +386,7 @@ static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t r6 = ctx[8U];
   uint32_t r7 = ctx[9U];
   uint32_t res[8U] = { r0, r1, r2, r3, r4, r5, r6, r7 };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, res[i]););
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_le(out + i * 4U, res[i]););
 }
 
 void
diff --git a/src/Hacl_Streaming_Blake2.c b/src/Hacl_Streaming_Blake2.c
deleted file mode 100644
index 948d56c2..00000000
--- a/src/Hacl_Streaming_Blake2.c
+++ /dev/null
@@ -1,655 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Blake2.h"
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2_blake2s_32_state *Hacl_Streaming_Blake2_blake2s_32_no_key_create_in(void)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
-  uint32_t *b = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state = { .fst = wv, .snd = b };
-  Hacl_Streaming_Blake2_blake2s_32_state
-  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Blake2_blake2s_32_state
-  *p =
-    (Hacl_Streaming_Blake2_blake2s_32_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Blake2_blake2s_32_state
-      ));
-  p[0U] = s1;
-  Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
-  return p;
-}
-
-/**
-  (Re-)initialization function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_32_state *s1)
-{
-  Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state;
-  Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
-  Hacl_Streaming_Blake2_blake2s_32_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s1[0U] = tmp;
-}
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2_blake2s_32_no_key_update(
-  Hacl_Streaming_Blake2_blake2s_32_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Blake2_blake2s_32_state s1 = *p;
-  uint64_t total_len = s1.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)64U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  if (len <= (uint32_t)64U - sz)
-  {
-    Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state;
-    uint8_t *buf = s2.buf;
-    uint64_t total_len1 = s2.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2s_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state;
-    uint8_t *buf = s2.buf;
-    uint64_t total_len1 = s2.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      uint32_t *wv = block_state1.fst;
-      uint32_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    uint32_t *wv = block_state1.fst;
-    uint32_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
-    Hacl_Blake2s_32_blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2s_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2s_32_block_state block_state10 = s2.block_state;
-    uint8_t *buf0 = s2.buf;
-    uint64_t total_len10 = s2.total_len;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)64U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2s_32_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2
-        }
-      );
-    Hacl_Streaming_Blake2_blake2s_32_state s20 = *p;
-    Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s20.block_state;
-    uint8_t *buf = s20.buf;
-    uint64_t total_len1 = s20.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      uint32_t *wv = block_state1.fst;
-      uint32_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    uint32_t *wv = block_state1.fst;
-    uint32_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
-    Hacl_Blake2s_32_blake2s_update_multi(data1_len, wv, hash, total_len1, data11, nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2s_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2_blake2s_32_no_key_finish(
-  Hacl_Streaming_Blake2_blake2s_32_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Blake2_blake2s_32_state scrut = *p;
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)64U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  uint8_t *buf_1 = buf_;
-  uint32_t wv0[16U] = { 0U };
-  uint32_t b[16U] = { 0U };
-  Hacl_Streaming_Blake2_blake2s_32_block_state tmp_block_state = { .fst = wv0, .snd = b };
-  uint32_t *src_b = block_state.snd;
-  uint32_t *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint32_t));
-  uint64_t prev_len = total_len - (uint64_t)r;
-  uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)64U;
-  }
-  else
-  {
-    ite = r % (uint32_t)64U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  uint32_t *wv1 = tmp_block_state.fst;
-  uint32_t *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)0U, wv1, hash0, prev_len, buf_multi, nb);
-  uint64_t prev_len_last = total_len - (uint64_t)r;
-  uint32_t *wv = tmp_block_state.fst;
-  uint32_t *hash = tmp_block_state.snd;
-  Hacl_Blake2s_32_blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd);
-}
-
-/**
-  Free state function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2s_32_no_key_free(Hacl_Streaming_Blake2_blake2s_32_state *s1)
-{
-  Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state;
-  uint32_t *wv = block_state.fst;
-  uint32_t *b = block_state.snd;
-  KRML_HOST_FREE(wv);
-  KRML_HOST_FREE(b);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s1);
-}
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2_blake2b_32_state *Hacl_Streaming_Blake2_blake2b_32_no_key_create_in(void)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
-  uint64_t *b = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state = { .fst = wv, .snd = b };
-  Hacl_Streaming_Blake2_blake2b_32_state
-  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Blake2_blake2b_32_state
-  *p =
-    (Hacl_Streaming_Blake2_blake2b_32_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Blake2_blake2b_32_state
-      ));
-  p[0U] = s1;
-  Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
-  return p;
-}
-
-/**
-  (Re)-initialization function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_32_state *s1)
-{
-  Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state;
-  Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
-  Hacl_Streaming_Blake2_blake2b_32_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s1[0U] = tmp;
-}
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2_blake2b_32_no_key_update(
-  Hacl_Streaming_Blake2_blake2b_32_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Blake2_blake2b_32_state s1 = *p;
-  uint64_t total_len = s1.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)128U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
-  }
-  if (len <= (uint32_t)128U - sz)
-  {
-    Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state;
-    uint8_t *buf = s2.buf;
-    uint64_t total_len1 = s2.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2b_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state;
-    uint8_t *buf = s2.buf;
-    uint64_t total_len1 = s2.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      uint64_t *wv = block_state1.fst;
-      uint64_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-        wv,
-        hash,
-        FStar_UInt128_uint64_to_uint128(prevlen),
-        buf,
-        nb);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)128U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    uint64_t *wv = block_state1.fst;
-    uint64_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
-    Hacl_Blake2b_32_blake2b_update_multi(data1_len,
-      wv,
-      hash,
-      FStar_UInt128_uint64_to_uint128(total_len1),
-      data1,
-      nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2b_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)128U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2b_32_block_state block_state10 = s2.block_state;
-    uint8_t *buf0 = s2.buf;
-    uint64_t total_len10 = s2.total_len;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)128U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2b_32_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2
-        }
-      );
-    Hacl_Streaming_Blake2_blake2b_32_state s20 = *p;
-    Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s20.block_state;
-    uint8_t *buf = s20.buf;
-    uint64_t total_len1 = s20.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      uint64_t *wv = block_state1.fst;
-      uint64_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-        wv,
-        hash,
-        FStar_UInt128_uint64_to_uint128(prevlen),
-        buf,
-        nb);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)128U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    uint64_t *wv = block_state1.fst;
-    uint64_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
-    Hacl_Blake2b_32_blake2b_update_multi(data1_len,
-      wv,
-      hash,
-      FStar_UInt128_uint64_to_uint128(total_len1),
-      data11,
-      nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2b_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
-  Hacl_Streaming_Blake2_blake2b_32_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Blake2_blake2b_32_state scrut = *p;
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)128U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
-  }
-  uint8_t *buf_1 = buf_;
-  uint64_t wv0[16U] = { 0U };
-  uint64_t b[16U] = { 0U };
-  Hacl_Streaming_Blake2_blake2b_32_block_state tmp_block_state = { .fst = wv0, .snd = b };
-  uint64_t *src_b = block_state.snd;
-  uint64_t *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint64_t));
-  uint64_t prev_len = total_len - (uint64_t)r;
-  uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)128U;
-  }
-  else
-  {
-    ite = r % (uint32_t)128U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  uint64_t *wv1 = tmp_block_state.fst;
-  uint64_t *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)0U,
-    wv1,
-    hash0,
-    FStar_UInt128_uint64_to_uint128(prev_len),
-    buf_multi,
-    nb);
-  uint64_t prev_len_last = total_len - (uint64_t)r;
-  uint64_t *wv = tmp_block_state.fst;
-  uint64_t *hash = tmp_block_state.snd;
-  Hacl_Blake2b_32_blake2b_update_last(r,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128(prev_len_last),
-    r,
-    buf_last);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd);
-}
-
-/**
-  Free state function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2b_32_no_key_free(Hacl_Streaming_Blake2_blake2b_32_state *s1)
-{
-  Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state;
-  uint64_t *wv = block_state.fst;
-  uint64_t *b = block_state.snd;
-  KRML_HOST_FREE(wv);
-  KRML_HOST_FREE(b);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s1);
-}
-
diff --git a/src/Hacl_Streaming_Blake2b_256.c b/src/Hacl_Streaming_Blake2b_256.c
deleted file mode 100644
index bdb5433f..00000000
--- a/src/Hacl_Streaming_Blake2b_256.c
+++ /dev/null
@@ -1,371 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Blake2b_256.h"
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2b_256_blake2b_256_state
-*Hacl_Streaming_Blake2b_256_blake2b_256_no_key_create_in(void)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  Lib_IntVector_Intrinsics_vec256
-  *wv =
-    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Lib_IntVector_Intrinsics_vec256
-  *b =
-    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = { .fst = wv, .snd = b };
-  Hacl_Streaming_Blake2b_256_blake2b_256_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Blake2b_256_blake2b_256_state
-  *p =
-    (Hacl_Streaming_Blake2b_256_blake2b_256_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Blake2b_256_blake2b_256_state
-      ));
-  p[0U] = s;
-  Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
-  return p;
-}
-
-/**
-  (Re-)initialization function when there is no key
-*/
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *s
-)
-{
-  Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state;
-  Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
-  Hacl_Streaming_Blake2b_256_blake2b_256_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
-}
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Blake2b_256_blake2b_256_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)128U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
-  }
-  if (len <= (uint32_t)128U - sz)
-  {
-    Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
-    Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2b_256_blake2b_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
-    Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
-      Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
-        wv,
-        hash,
-        FStar_UInt128_uint64_to_uint128(prevlen),
-        buf,
-        nb);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)128U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
-    Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
-    Hacl_Blake2b_256_blake2b_update_multi(data1_len,
-      wv,
-      hash,
-      FStar_UInt128_uint64_to_uint128(total_len1),
-      data1,
-      nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2b_256_blake2b_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)128U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
-    Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)128U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2b_256_blake2b_256_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2
-        }
-      );
-    Hacl_Streaming_Blake2b_256_blake2b_256_state s10 = *p;
-    Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
-      Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
-        wv,
-        hash,
-        FStar_UInt128_uint64_to_uint128(prevlen),
-        buf,
-        nb);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)128U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
-    Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
-    Hacl_Blake2b_256_blake2b_update_multi(data1_len,
-      wv,
-      hash,
-      FStar_UInt128_uint64_to_uint128(total_len1),
-      data11,
-      nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2b_256_blake2b_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *p;
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)128U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
-  }
-  uint8_t *buf_1 = buf_;
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state tmp_block_state = { .fst = wv0, .snd = b };
-  Lib_IntVector_Intrinsics_vec256 *src_b = block_state.snd;
-  Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  uint64_t prev_len = total_len - (uint64_t)r;
-  uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)128U;
-  }
-  else
-  {
-    ite = r % (uint32_t)128U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  Lib_IntVector_Intrinsics_vec256 *wv1 = tmp_block_state.fst;
-  Lib_IntVector_Intrinsics_vec256 *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2b_256_blake2b_update_multi((uint32_t)0U,
-    wv1,
-    hash0,
-    FStar_UInt128_uint64_to_uint128(prev_len),
-    buf_multi,
-    nb);
-  uint64_t prev_len_last = total_len - (uint64_t)r;
-  Lib_IntVector_Intrinsics_vec256 *wv = tmp_block_state.fst;
-  Lib_IntVector_Intrinsics_vec256 *hash = tmp_block_state.snd;
-  Hacl_Blake2b_256_blake2b_update_last(r,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128(prev_len_last),
-    r,
-    buf_last);
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd);
-}
-
-/**
-  Free state function when there is no key
-*/
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_free(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *s
-)
-{
-  Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state;
-  Lib_IntVector_Intrinsics_vec256 *wv = block_state.fst;
-  Lib_IntVector_Intrinsics_vec256 *b = block_state.snd;
-  KRML_ALIGNED_FREE(wv);
-  KRML_ALIGNED_FREE(b);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/Hacl_Streaming_Blake2s_128.c b/src/Hacl_Streaming_Blake2s_128.c
deleted file mode 100644
index f97bf5d0..00000000
--- a/src/Hacl_Streaming_Blake2s_128.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Blake2s_128.h"
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2s_128_blake2s_128_state
-*Hacl_Streaming_Blake2s_128_blake2s_128_no_key_create_in(void)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  Lib_IntVector_Intrinsics_vec128
-  *wv =
-    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Lib_IntVector_Intrinsics_vec128
-  *b =
-    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = { .fst = wv, .snd = b };
-  Hacl_Streaming_Blake2s_128_blake2s_128_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Blake2s_128_blake2s_128_state
-  *p =
-    (Hacl_Streaming_Blake2s_128_blake2s_128_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Blake2s_128_blake2s_128_state
-      ));
-  p[0U] = s;
-  Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
-  return p;
-}
-
-/**
-  (Re-)initialization function when there is no key
-*/
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *s
-)
-{
-  Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state;
-  Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
-  Hacl_Streaming_Blake2s_128_blake2s_128_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
-}
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Blake2s_128_blake2s_128_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)64U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  if (len <= (uint32_t)64U - sz)
-  {
-    Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
-    Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2s_128_blake2s_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
-    Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
-      Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
-    Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
-    Hacl_Blake2s_128_blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2s_128_blake2s_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
-    Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)64U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2s_128_blake2s_128_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2
-        }
-      );
-    Hacl_Streaming_Blake2s_128_blake2s_128_state s10 = *p;
-    Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
-      Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
-    Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
-    Hacl_Blake2s_128_blake2s_update_multi(data1_len, wv, hash, total_len1, data11, nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2s_128_blake2s_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *p;
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)64U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  uint8_t *buf_1 = buf_;
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state tmp_block_state = { .fst = wv0, .snd = b };
-  Lib_IntVector_Intrinsics_vec128 *src_b = block_state.snd;
-  Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  uint64_t prev_len = total_len - (uint64_t)r;
-  uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)64U;
-  }
-  else
-  {
-    ite = r % (uint32_t)64U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  Lib_IntVector_Intrinsics_vec128 *wv1 = tmp_block_state.fst;
-  Lib_IntVector_Intrinsics_vec128 *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2s_128_blake2s_update_multi((uint32_t)0U, wv1, hash0, prev_len, buf_multi, nb);
-  uint64_t prev_len_last = total_len - (uint64_t)r;
-  Lib_IntVector_Intrinsics_vec128 *wv = tmp_block_state.fst;
-  Lib_IntVector_Intrinsics_vec128 *hash = tmp_block_state.snd;
-  Hacl_Blake2s_128_blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd);
-}
-
-/**
-  Free state function when there is no key
-*/
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_free(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *s
-)
-{
-  Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state;
-  Lib_IntVector_Intrinsics_vec128 *wv = block_state.fst;
-  Lib_IntVector_Intrinsics_vec128 *b = block_state.snd;
-  KRML_ALIGNED_FREE(wv);
-  KRML_ALIGNED_FREE(b);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/Hacl_Streaming_Poly1305_128.c b/src/Hacl_Streaming_Poly1305_128.c
deleted file mode 100644
index c3f7c19a..00000000
--- a/src/Hacl_Streaming_Poly1305_128.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Poly1305_128.h"
-
-Hacl_Streaming_Poly1305_128_poly1305_128_state
-*Hacl_Streaming_Poly1305_128_create_in(uint8_t *k)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  Lib_IntVector_Intrinsics_vec128
-  *r1 =
-    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)25U);
-  memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Lib_IntVector_Intrinsics_vec128 *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_0 = k_;
-  Hacl_Streaming_Poly1305_128_poly1305_128_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
-  Hacl_Streaming_Poly1305_128_poly1305_128_state
-  *p =
-    (Hacl_Streaming_Poly1305_128_poly1305_128_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Poly1305_128_poly1305_128_state
-      ));
-  p[0U] = s;
-  Hacl_Poly1305_128_poly1305_init(block_state, k);
-  return p;
-}
-
-void
-Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly1305_128_state *s)
-{
-  Hacl_Streaming_Poly1305_128_poly1305_128_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
-  Hacl_Poly1305_128_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_1 = k_;
-  Hacl_Streaming_Poly1305_128_poly1305_128_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
-  s[0U] = tmp;
-}
-
-/**
-0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_128_update(
-  Hacl_Streaming_Poly1305_128_poly1305_128_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Poly1305_128_poly1305_128_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)32U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)32U);
-  }
-  if (len <= (uint32_t)32U - sz)
-  {
-    Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)32U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_128_poly1305_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)32U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)32U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)32U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)32U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)32U;
-    uint32_t data1_len = n_blocks * (uint32_t)32U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Poly1305_128_poly1305_update(block_state1, data1_len, data1);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_128_poly1305_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len,
-          .p_key = k_1
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)32U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec128 *block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)32U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)32U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_128_poly1305_128_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-    Hacl_Streaming_Poly1305_128_poly1305_128_state s10 = *p;
-    Lib_IntVector_Intrinsics_vec128 *block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint8_t *k_10 = s10.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)32U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)32U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)32U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)32U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)32U;
-    uint32_t data1_len = n_blocks * (uint32_t)32U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Poly1305_128_poly1305_update(block_state1, data1_len, data11);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_128_poly1305_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff),
-          .p_key = k_10
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-void
-Hacl_Streaming_Poly1305_128_finish(
-  Hacl_Streaming_Poly1305_128_poly1305_128_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Poly1305_128_poly1305_128_state scrut = *p;
-  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint8_t *k_ = scrut.p_key;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)32U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)32U);
-  }
-  uint8_t *buf_1 = buf_;
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 r1[25U] KRML_POST_ALIGN(16) = { 0U };
-  Lib_IntVector_Intrinsics_vec128 *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  uint32_t ite0;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite0 = (uint32_t)16U;
-  }
-  else
-  {
-    ite0 = r % (uint32_t)16U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite0;
-  uint8_t *buf_multi = buf_1;
-  uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)16U;
-  }
-  else
-  {
-    ite = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_128_poly1305_update(tmp_block_state, r - ite, buf_multi);
-  uint32_t ite1;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite1 = (uint32_t)16U;
-  }
-  else
-  {
-    ite1 = r % (uint32_t)16U;
-  }
-  KRML_HOST_IGNORE(total_len - (uint64_t)ite1);
-  uint32_t ite2;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite2 = (uint32_t)16U;
-  }
-  else
-  {
-    ite2 = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_128_poly1305_update(tmp_block_state, ite2, buf_last);
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 tmp[25U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Hacl_Poly1305_128_poly1305_finish(dst, k_, tmp);
-}
-
-void Hacl_Streaming_Poly1305_128_free(Hacl_Streaming_Poly1305_128_poly1305_128_state *s)
-{
-  Hacl_Streaming_Poly1305_128_poly1305_128_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
-  KRML_HOST_FREE(k_);
-  KRML_ALIGNED_FREE(block_state);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/Hacl_Streaming_Poly1305_256.c b/src/Hacl_Streaming_Poly1305_256.c
deleted file mode 100644
index e56275a4..00000000
--- a/src/Hacl_Streaming_Poly1305_256.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Poly1305_256.h"
-
-Hacl_Streaming_Poly1305_256_poly1305_256_state
-*Hacl_Streaming_Poly1305_256_create_in(uint8_t *k)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  Lib_IntVector_Intrinsics_vec256
-  *r1 =
-    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)25U);
-  memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Lib_IntVector_Intrinsics_vec256 *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_0 = k_;
-  Hacl_Streaming_Poly1305_256_poly1305_256_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
-  Hacl_Streaming_Poly1305_256_poly1305_256_state
-  *p =
-    (Hacl_Streaming_Poly1305_256_poly1305_256_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Poly1305_256_poly1305_256_state
-      ));
-  p[0U] = s;
-  Hacl_Poly1305_256_poly1305_init(block_state, k);
-  return p;
-}
-
-void
-Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly1305_256_state *s)
-{
-  Hacl_Streaming_Poly1305_256_poly1305_256_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
-  Hacl_Poly1305_256_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_1 = k_;
-  Hacl_Streaming_Poly1305_256_poly1305_256_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
-  s[0U] = tmp;
-}
-
-/**
-0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_256_update(
-  Hacl_Streaming_Poly1305_256_poly1305_256_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Poly1305_256_poly1305_256_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)64U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  if (len <= (uint32_t)64U - sz)
-  {
-    Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_256_poly1305_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Poly1305_256_poly1305_update(block_state1, data1_len, data1);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_256_poly1305_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len,
-          .p_key = k_1
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec256 *block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)64U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_256_poly1305_256_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-    Hacl_Streaming_Poly1305_256_poly1305_256_state s10 = *p;
-    Lib_IntVector_Intrinsics_vec256 *block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint8_t *k_10 = s10.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Poly1305_256_poly1305_update(block_state1, data1_len, data11);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_256_poly1305_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff),
-          .p_key = k_10
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-void
-Hacl_Streaming_Poly1305_256_finish(
-  Hacl_Streaming_Poly1305_256_poly1305_256_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Poly1305_256_poly1305_256_state scrut = *p;
-  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint8_t *k_ = scrut.p_key;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)64U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  uint8_t *buf_1 = buf_;
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 r1[25U] KRML_POST_ALIGN(32) = { 0U };
-  Lib_IntVector_Intrinsics_vec256 *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  uint32_t ite0;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite0 = (uint32_t)16U;
-  }
-  else
-  {
-    ite0 = r % (uint32_t)16U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite0;
-  uint8_t *buf_multi = buf_1;
-  uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)16U;
-  }
-  else
-  {
-    ite = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_256_poly1305_update(tmp_block_state, r - ite, buf_multi);
-  uint32_t ite1;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite1 = (uint32_t)16U;
-  }
-  else
-  {
-    ite1 = r % (uint32_t)16U;
-  }
-  KRML_HOST_IGNORE(total_len - (uint64_t)ite1);
-  uint32_t ite2;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite2 = (uint32_t)16U;
-  }
-  else
-  {
-    ite2 = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_256_poly1305_update(tmp_block_state, ite2, buf_last);
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 tmp[25U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Hacl_Poly1305_256_poly1305_finish(dst, k_, tmp);
-}
-
-void Hacl_Streaming_Poly1305_256_free(Hacl_Streaming_Poly1305_256_poly1305_256_state *s)
-{
-  Hacl_Streaming_Poly1305_256_poly1305_256_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
-  KRML_HOST_FREE(k_);
-  KRML_ALIGNED_FREE(block_state);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/Hacl_Streaming_Poly1305_32.c b/src/Hacl_Streaming_Poly1305_32.c
deleted file mode 100644
index 249a622f..00000000
--- a/src/Hacl_Streaming_Poly1305_32.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Poly1305_32.h"
-
-Hacl_Streaming_Poly1305_32_poly1305_32_state *Hacl_Streaming_Poly1305_32_create_in(uint8_t *k)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint8_t));
-  uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
-  uint64_t *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_0 = k_;
-  Hacl_Streaming_Poly1305_32_poly1305_32_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
-  Hacl_Streaming_Poly1305_32_poly1305_32_state
-  *p =
-    (Hacl_Streaming_Poly1305_32_poly1305_32_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Poly1305_32_poly1305_32_state
-      ));
-  p[0U] = s;
-  Hacl_Poly1305_32_poly1305_init(block_state, k);
-  return p;
-}
-
-void
-Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_32_state *s)
-{
-  Hacl_Streaming_Poly1305_32_poly1305_32_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  uint64_t *block_state = scrut.block_state;
-  Hacl_Poly1305_32_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_1 = k_;
-  Hacl_Streaming_Poly1305_32_poly1305_32_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
-  s[0U] = tmp;
-}
-
-/**
-0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_32_update(
-  Hacl_Streaming_Poly1305_32_poly1305_32_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Poly1305_32_poly1305_32_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)16U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)16U);
-  }
-  if (len <= (uint32_t)16U - sz)
-  {
-    Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
-    uint64_t *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)16U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_32_poly1305_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
-    uint64_t *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)16U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)16U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)16U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)16U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)16U;
-    uint32_t data1_len = n_blocks * (uint32_t)16U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Poly1305_32_poly1305_update(block_state1, data1_len, data1);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_32_poly1305_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len,
-          .p_key = k_1
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)16U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
-    uint64_t *block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)16U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)16U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_32_poly1305_32_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-    Hacl_Streaming_Poly1305_32_poly1305_32_state s10 = *p;
-    uint64_t *block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint8_t *k_10 = s10.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)16U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)16U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)16U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)16U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)16U;
-    uint32_t data1_len = n_blocks * (uint32_t)16U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Poly1305_32_poly1305_update(block_state1, data1_len, data11);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_32_poly1305_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff),
-          .p_key = k_10
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-void
-Hacl_Streaming_Poly1305_32_finish(
-  Hacl_Streaming_Poly1305_32_poly1305_32_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Poly1305_32_poly1305_32_state scrut = *p;
-  uint64_t *block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint8_t *k_ = scrut.p_key;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)16U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)16U);
-  }
-  uint8_t *buf_1 = buf_;
-  uint64_t r1[25U] = { 0U };
-  uint64_t *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (uint64_t));
-  uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)16U;
-  }
-  else
-  {
-    ite = r % (uint32_t)16U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  Hacl_Poly1305_32_poly1305_update(tmp_block_state, (uint32_t)0U, buf_multi);
-  Hacl_Poly1305_32_poly1305_update(tmp_block_state, r, buf_last);
-  uint64_t tmp[25U] = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (uint64_t));
-  Hacl_Poly1305_32_poly1305_finish(dst, k_, tmp);
-}
-
-void Hacl_Streaming_Poly1305_32_free(Hacl_Streaming_Poly1305_32_poly1305_32_state *s)
-{
-  Hacl_Streaming_Poly1305_32_poly1305_32_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  uint64_t *block_state = scrut.block_state;
-  KRML_HOST_FREE(k_);
-  KRML_HOST_FREE(block_state);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/msvc/EverCrypt_AEAD.c b/src/msvc/EverCrypt_AEAD.c
index d3a4ffbe..b0fb4826 100644
--- a/src/msvc/EverCrypt_AEAD.c
+++ b/src/msvc/EverCrypt_AEAD.c
@@ -46,8 +46,8 @@ The state may be reused as many times as desired.
 */
 bool EverCrypt_AEAD_uu___is_Ek(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s projectee)
 {
-  KRML_HOST_IGNORE(a);
-  KRML_HOST_IGNORE(projectee);
+  KRML_MAYBE_UNUSED_VAR(a);
+  KRML_MAYBE_UNUSED_VAR(projectee);
   return true;
 }
 
@@ -86,11 +86,11 @@ Spec_Agile_AEAD_alg EverCrypt_AEAD_alg_of_state(EverCrypt_AEAD_state_s *s)
 static EverCrypt_Error_error_code
 create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+  uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
   EverCrypt_AEAD_state_s
   *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
   p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek });
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   dst[0U] = p;
   return EverCrypt_Error_Success;
 }
@@ -98,8 +98,8 @@ create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 static EverCrypt_Error_error_code
 create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(k);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
   #if HACL_CAN_COMPILE_VALE
   bool has_aesni = EverCrypt_AutoConfig2_has_aesni();
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
@@ -108,11 +108,11 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
   bool has_movbe = EverCrypt_AutoConfig2_has_movbe();
   if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe)
   {
-    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)480U, sizeof (uint8_t));
+    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(480U, sizeof (uint8_t));
     uint8_t *keys_b = ek;
-    uint8_t *hkeys_b = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b, hkeys_b));
+    uint8_t *hkeys_b = ek + 176U;
+    aes128_key_expansion(k, keys_b);
+    aes128_keyhash_init(keys_b, hkeys_b);
     EverCrypt_AEAD_state_s
     *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
     p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek });
@@ -128,8 +128,8 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 static EverCrypt_Error_error_code
 create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(k);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
   #if HACL_CAN_COMPILE_VALE
   bool has_aesni = EverCrypt_AutoConfig2_has_aesni();
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
@@ -138,11 +138,11 @@ create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k)
   bool has_movbe = EverCrypt_AutoConfig2_has_movbe();
   if (has_aesni && has_pclmulqdq && has_avx && has_sse && has_movbe)
   {
-    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)544U, sizeof (uint8_t));
+    uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC(544U, sizeof (uint8_t));
     uint8_t *keys_b = ek;
-    uint8_t *hkeys_b = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b, hkeys_b));
+    uint8_t *hkeys_b = ek + 240U;
+    aes256_key_expansion(k, keys_b);
+    aes256_keyhash_init(keys_b, hkeys_b);
     EverCrypt_AEAD_state_s
     *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s));
     p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek });
@@ -208,115 +208,106 @@ encrypt_aes128_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)304U;
+  uint8_t *scratch_b = ek + 304U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *plain_b_ = plain;
   uint8_t *out_b_ = cipher;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    plain + plain_len_,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+  memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+  if (len128x6 / 16ULL >= 18ULL)
   {
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    gcm128_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
-    KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
+    gcm128_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
-  memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
   return EverCrypt_Error_Success;
   #else
   KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n",
@@ -340,115 +331,106 @@ encrypt_aes256_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)368U;
+  uint8_t *scratch_b = ek + 368U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *plain_b_ = plain;
   uint8_t *out_b_ = cipher;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    plain + plain_len_,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+  memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+  if (len128x6 / 16ULL >= 18ULL)
   {
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    gcm256_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = plain_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = plain_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
-    KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-        (uint64_t)ad_len,
-        auth_num,
-        keys_b,
-        tmp_iv,
-        hkeys_b,
-        abytes_b,
-        in128x6_b,
-        out128x6_b,
-        len128x6_,
-        in128_b,
-        out128_b,
-        len128_num_,
-        inout_b,
-        (uint64_t)plain_len,
-        scratch_b1,
-        tag));
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
+    gcm256_encrypt_opt(auth_b_,
+      (uint64_t)ad_len,
+      auth_num,
+      keys_b,
+      tmp_iv,
+      hkeys_b,
+      abytes_b,
+      in128x6_b,
+      out128x6_b,
+      len128x6_,
+      in128_b,
+      out128_b,
+      len128_num_,
+      inout_b,
+      (uint64_t)plain_len,
+      scratch_b1,
+      tag);
   }
-  memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
   return EverCrypt_Error_Success;
   #else
   KRML_HOST_EPRINTF("KaRaMeL abort at %s:%d\n%s\n",
@@ -510,7 +492,7 @@ EverCrypt_AEAD_encrypt(
       }
     case Spec_Cipher_Expansion_Hacl_CHACHA20:
       {
-        if (iv_len != (uint32_t)12U)
+        if (iv_len != 12U)
         {
           return EverCrypt_Error_InvalidIVLength;
         }
@@ -546,124 +528,115 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[480U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-  KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 176U;
+  aes128_key_expansion(k, keys_b0);
+  aes128_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
   }
-  else if (iv_len == (uint32_t)0U)
+  else if (iv_len == 0U)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
   }
   else
   {
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)304U;
+    uint8_t *scratch_b = ek0 + 304U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+    uint8_t *hkeys_b = ek1 + 176U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *plain_b_ = plain;
     uint8_t *out_b_ = cipher;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      plain + plain_len_,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+    memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+    if (len128x6 / 16ULL >= 18ULL)
     {
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      gcm128_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
-      KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
+      gcm128_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
-    memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
     KRML_HOST_IGNORE(EverCrypt_Error_Success);
   }
   return EverCrypt_Error_Success;
@@ -697,124 +670,115 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[544U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-  KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 240U;
+  aes256_key_expansion(k, keys_b0);
+  aes256_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
   }
-  else if (iv_len == (uint32_t)0U)
+  else if (iv_len == 0U)
   {
     KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
   }
   else
   {
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)368U;
+    uint8_t *scratch_b = ek0 + 368U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+    uint8_t *hkeys_b = ek1 + 240U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *plain_b_ = plain;
     uint8_t *out_b_ = cipher;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      plain + plain_len_,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+    memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+    if (len128x6 / 16ULL >= 18ULL)
     {
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      gcm256_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = plain_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = plain_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
-      KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-          (uint64_t)ad_len,
-          auth_num,
-          keys_b,
-          tmp_iv,
-          hkeys_b,
-          abytes_b,
-          in128x6_b,
-          out128x6_b,
-          len128x6_,
-          in128_b,
-          out128_b,
-          len128_num_,
-          inout_b,
-          (uint64_t)plain_len,
-          scratch_b1,
-          tag));
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
+      gcm256_encrypt_opt(auth_b_,
+        (uint64_t)ad_len,
+        auth_num,
+        keys_b,
+        tmp_iv,
+        hkeys_b,
+        abytes_b,
+        in128x6_b,
+        out128x6_b,
+        len128x6_,
+        in128_b,
+        out128_b,
+        len128_num_,
+        inout_b,
+        (uint64_t)plain_len,
+        scratch_b1,
+        tag);
     }
-    memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
     KRML_HOST_IGNORE(EverCrypt_Error_Success);
   }
   return EverCrypt_Error_Success;
@@ -840,15 +804,15 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -859,112 +823,103 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm(
   {
     uint8_t ek[480U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 176U;
+    aes128_key_expansion(k, keys_b0);
+    aes128_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
     }
-    else if (iv_len == (uint32_t)0U)
+    else if (iv_len == 0U)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
     }
     else
     {
       uint8_t *ek0 = (*s).ek;
-      uint8_t *scratch_b = ek0 + (uint32_t)304U;
+      uint8_t *scratch_b = ek0 + 304U;
       uint8_t *ek1 = ek0;
       uint8_t *keys_b = ek1;
-      uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+      uint8_t *hkeys_b = ek1 + 176U;
       uint8_t tmp_iv[16U] = { 0U };
-      uint32_t len = iv_len / (uint32_t)16U;
-      uint32_t bytes_len = len * (uint32_t)16U;
+      uint32_t len = iv_len / 16U;
+      uint32_t bytes_len = len * 16U;
       uint8_t *iv_b = iv;
-      memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-      KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-          (uint64_t)iv_len,
-          (uint64_t)len,
-          tmp_iv,
-          tmp_iv,
-          hkeys_b));
+      memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+      compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
       uint8_t *inout_b = scratch_b;
-      uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-      uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+      uint8_t *abytes_b = scratch_b + 16U;
+      uint8_t *scratch_b1 = scratch_b + 32U;
+      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
       uint8_t *plain_b_ = plain;
       uint8_t *out_b_ = cipher;
       uint8_t *auth_b_ = ad;
-      memcpy(inout_b,
-        plain + plain_len_,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-      memcpy(abytes_b,
-        ad + auth_len_,
-        (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-      uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-      if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+      memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+      memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+      uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+      if (len128x6 / 16ULL >= 18ULL)
       {
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
         uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128x6_ = len128x6 / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        gcm128_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
       else
       {
-        uint32_t len128x61 = (uint32_t)0U;
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+        uint32_t len128x61 = 0U;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + len128x61;
         uint8_t *out128_b = out_b_ + len128x61;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        uint64_t len128x6_ = (uint64_t)0U;
-        KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        uint64_t len128x6_ = 0ULL;
+        gcm128_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
-      memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+      memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
         inout_b,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+        (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
       KRML_HOST_IGNORE(EverCrypt_Error_Success);
     }
     return EverCrypt_Error_Success;
@@ -988,15 +943,15 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(plain);
-  KRML_HOST_IGNORE(plain_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(tag);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(plain);
+  KRML_MAYBE_UNUSED_VAR(plain_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(tag);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -1007,112 +962,103 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm(
   {
     uint8_t ek[544U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 240U;
+    aes256_key_expansion(k, keys_b0);
+    aes256_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey);
     }
-    else if (iv_len == (uint32_t)0U)
+    else if (iv_len == 0U)
     {
       KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength);
     }
     else
     {
       uint8_t *ek0 = (*s).ek;
-      uint8_t *scratch_b = ek0 + (uint32_t)368U;
+      uint8_t *scratch_b = ek0 + 368U;
       uint8_t *ek1 = ek0;
       uint8_t *keys_b = ek1;
-      uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+      uint8_t *hkeys_b = ek1 + 240U;
       uint8_t tmp_iv[16U] = { 0U };
-      uint32_t len = iv_len / (uint32_t)16U;
-      uint32_t bytes_len = len * (uint32_t)16U;
+      uint32_t len = iv_len / 16U;
+      uint32_t bytes_len = len * 16U;
       uint8_t *iv_b = iv;
-      memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-      KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-          (uint64_t)iv_len,
-          (uint64_t)len,
-          tmp_iv,
-          tmp_iv,
-          hkeys_b));
+      memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+      compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
       uint8_t *inout_b = scratch_b;
-      uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-      uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U;
-      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+      uint8_t *abytes_b = scratch_b + 16U;
+      uint8_t *scratch_b1 = scratch_b + 32U;
+      uint32_t plain_len_ = (uint32_t)(uint64_t)plain_len / 16U * 16U;
+      uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
       uint8_t *plain_b_ = plain;
       uint8_t *out_b_ = cipher;
       uint8_t *auth_b_ = ad;
-      memcpy(inout_b,
-        plain + plain_len_,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
-      memcpy(abytes_b,
-        ad + auth_len_,
-        (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-      uint64_t len128x6 = (uint64_t)plain_len / (uint64_t)96U * (uint64_t)96U;
-      if (len128x6 / (uint64_t)16U >= (uint64_t)18U)
+      memcpy(inout_b, plain + plain_len_, (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
+      memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+      uint64_t len128x6 = (uint64_t)plain_len / 96ULL * 96ULL;
+      if (len128x6 / 16ULL >= 18ULL)
       {
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL - len128x6;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + (uint32_t)len128x6;
         uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128x6_ = len128x6 / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        gcm256_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
       else
       {
-        uint32_t len128x61 = (uint32_t)0U;
-        uint64_t len128_num = (uint64_t)plain_len / (uint64_t)16U * (uint64_t)16U;
+        uint32_t len128x61 = 0U;
+        uint64_t len128_num = (uint64_t)plain_len / 16ULL * 16ULL;
         uint8_t *in128x6_b = plain_b_;
         uint8_t *out128x6_b = out_b_;
         uint8_t *in128_b = plain_b_ + len128x61;
         uint8_t *out128_b = out_b_ + len128x61;
-        uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-        uint64_t len128_num_ = len128_num / (uint64_t)16U;
-        uint64_t len128x6_ = (uint64_t)0U;
-        KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_,
-            (uint64_t)ad_len,
-            auth_num,
-            keys_b,
-            tmp_iv,
-            hkeys_b,
-            abytes_b,
-            in128x6_b,
-            out128x6_b,
-            len128x6_,
-            in128_b,
-            out128_b,
-            len128_num_,
-            inout_b,
-            (uint64_t)plain_len,
-            scratch_b1,
-            tag));
+        uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+        uint64_t len128_num_ = len128_num / 16ULL;
+        uint64_t len128x6_ = 0ULL;
+        gcm256_encrypt_opt(auth_b_,
+          (uint64_t)ad_len,
+          auth_num,
+          keys_b,
+          tmp_iv,
+          hkeys_b,
+          abytes_b,
+          in128x6_b,
+          out128x6_b,
+          len128x6_,
+          in128_b,
+          out128_b,
+          len128_num_,
+          inout_b,
+          (uint64_t)plain_len,
+          scratch_b1,
+          tag);
       }
-      memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U,
+      memcpy(cipher + (uint32_t)(uint64_t)plain_len / 16U * 16U,
         inout_b,
-        (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t));
+        (uint32_t)(uint64_t)plain_len % 16U * sizeof (uint8_t));
       KRML_HOST_IGNORE(EverCrypt_Error_Success);
     }
     return EverCrypt_Error_Success;
@@ -1136,10 +1082,10 @@ EverCrypt_AEAD_encrypt_expand_chacha20_poly1305(
   uint8_t *tag
 )
 {
-  KRML_HOST_IGNORE(iv_len);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
   uint8_t ek[32U] = { 0U };
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek };
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   EverCrypt_AEAD_state_s *s = &p;
   uint8_t *ek0 = (*s).ek;
   EverCrypt_Chacha20Poly1305_aead_encrypt(ek0, iv, ad_len, ad, plain_len, plain, cipher, tag);
@@ -1222,66 +1168,57 @@ decrypt_aes128_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)304U;
+  uint8_t *scratch_b = ek + 304U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1305,15 +1242,15 @@ decrypt_aes128_gcm(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1335,11 +1272,11 @@ decrypt_aes128_gcm(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1366,66 +1303,57 @@ decrypt_aes256_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(s);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(s);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
-  uint8_t *scratch_b = ek + (uint32_t)368U;
+  uint8_t *scratch_b = ek + 368U;
   uint8_t *ek1 = ek;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1449,15 +1377,15 @@ decrypt_aes256_gcm(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1479,11 +1407,11 @@ decrypt_aes256_gcm(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1514,14 +1442,14 @@ decrypt_chacha20_poly1305(
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len != (uint32_t)12U)
+  if (iv_len != 12U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek = (*s).ek;
   uint32_t
   r = EverCrypt_Chacha20Poly1305_aead_decrypt(ek, iv, ad_len, ad, cipher_len, dst, cipher, tag);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return EverCrypt_Error_Success;
   }
@@ -1620,73 +1548,64 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[480U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-  KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 176U;
+  aes128_key_expansion(k, keys_b0);
+  aes128_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek0 = (*s).ek;
-  uint8_t *scratch_b = ek0 + (uint32_t)304U;
+  uint8_t *scratch_b = ek0 + 304U;
   uint8_t *ek1 = ek0;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+  uint8_t *hkeys_b = ek1 + 176U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1710,15 +1629,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm128_decrypt_opt(auth_b_,
@@ -1740,11 +1659,11 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1779,73 +1698,64 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ek[544U] = { 0U };
   uint8_t *keys_b0 = ek;
-  uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-  KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-  KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+  uint8_t *hkeys_b0 = ek + 240U;
+  aes256_key_expansion(k, keys_b0);
+  aes256_keyhash_init(keys_b0, hkeys_b0);
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
   EverCrypt_AEAD_state_s *s = &p;
   if (s == NULL)
   {
     return EverCrypt_Error_InvalidKey;
   }
-  if (iv_len == (uint32_t)0U)
+  if (iv_len == 0U)
   {
     return EverCrypt_Error_InvalidIVLength;
   }
   uint8_t *ek0 = (*s).ek;
-  uint8_t *scratch_b = ek0 + (uint32_t)368U;
+  uint8_t *scratch_b = ek0 + 368U;
   uint8_t *ek1 = ek0;
   uint8_t *keys_b = ek1;
-  uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+  uint8_t *hkeys_b = ek1 + 240U;
   uint8_t tmp_iv[16U] = { 0U };
-  uint32_t len = iv_len / (uint32_t)16U;
-  uint32_t bytes_len = len * (uint32_t)16U;
+  uint32_t len = iv_len / 16U;
+  uint32_t bytes_len = len * 16U;
   uint8_t *iv_b = iv;
-  memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-      (uint64_t)iv_len,
-      (uint64_t)len,
-      tmp_iv,
-      tmp_iv,
-      hkeys_b));
+  memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+  compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
   uint8_t *inout_b = scratch_b;
-  uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-  uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+  uint8_t *abytes_b = scratch_b + 16U;
+  uint8_t *scratch_b1 = scratch_b + 32U;
+  uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+  uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
   uint8_t *cipher_b_ = cipher;
   uint8_t *out_b_ = dst;
   uint8_t *auth_b_ = ad;
-  memcpy(inout_b,
-    cipher + cipher_len_,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-  memcpy(abytes_b,
-    ad + auth_len_,
-    (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-  uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+  memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+  memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+  uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
   uint64_t c;
-  if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+  if (len128x6 / 16ULL >= 6ULL)
   {
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
     uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128x6_ = len128x6 / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1869,15 +1779,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
   }
   else
   {
-    uint32_t len128x61 = (uint32_t)0U;
-    uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+    uint32_t len128x61 = 0U;
+    uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
     uint8_t *in128x6_b = cipher_b_;
     uint8_t *out128x6_b = out_b_;
     uint8_t *in128_b = cipher_b_ + len128x61;
     uint8_t *out128_b = out_b_ + len128x61;
-    uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-    uint64_t len128_num_ = len128_num / (uint64_t)16U;
-    uint64_t len128x6_ = (uint64_t)0U;
+    uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+    uint64_t len128_num_ = len128_num / 16ULL;
+    uint64_t len128x6_ = 0ULL;
     uint64_t
     c0 =
       gcm256_decrypt_opt(auth_b_,
@@ -1899,11 +1809,11 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check(
         tag);
     c = c0;
   }
-  memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+  memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
     inout_b,
-    (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+    (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
   uint64_t r = c;
-  if (r == (uint64_t)0U)
+  if (r == 0ULL)
   {
     return EverCrypt_Error_Success;
   }
@@ -1930,15 +1840,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -1949,61 +1859,52 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
   {
     uint8_t ek[480U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)176U;
-    KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 176U;
+    aes128_key_expansion(k, keys_b0);
+    aes128_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       return EverCrypt_Error_InvalidKey;
     }
-    if (iv_len == (uint32_t)0U)
+    if (iv_len == 0U)
     {
       return EverCrypt_Error_InvalidIVLength;
     }
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)304U;
+    uint8_t *scratch_b = ek0 + 304U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)176U;
+    uint8_t *hkeys_b = ek1 + 176U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *cipher_b_ = cipher;
     uint8_t *out_b_ = dst;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      cipher + cipher_len_,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+    memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
     uint64_t c;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+    if (len128x6 / 16ULL >= 6ULL)
     {
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
       uint64_t
       c0 =
         gcm128_decrypt_opt(auth_b_,
@@ -2027,15 +1928,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
       uint64_t
       c0 =
         gcm128_decrypt_opt(auth_b_,
@@ -2057,11 +1958,11 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm(
           tag);
       c = c0;
     }
-    memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
     uint64_t r = c;
-    if (r == (uint64_t)0U)
+    if (r == 0ULL)
     {
       return EverCrypt_Error_Success;
     }
@@ -2086,15 +1987,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
   uint8_t *dst
 )
 {
-  KRML_HOST_IGNORE(k);
-  KRML_HOST_IGNORE(iv);
-  KRML_HOST_IGNORE(iv_len);
-  KRML_HOST_IGNORE(ad);
-  KRML_HOST_IGNORE(ad_len);
-  KRML_HOST_IGNORE(cipher);
-  KRML_HOST_IGNORE(cipher_len);
-  KRML_HOST_IGNORE(tag);
-  KRML_HOST_IGNORE(dst);
+  KRML_MAYBE_UNUSED_VAR(k);
+  KRML_MAYBE_UNUSED_VAR(iv);
+  KRML_MAYBE_UNUSED_VAR(iv_len);
+  KRML_MAYBE_UNUSED_VAR(ad);
+  KRML_MAYBE_UNUSED_VAR(ad_len);
+  KRML_MAYBE_UNUSED_VAR(cipher);
+  KRML_MAYBE_UNUSED_VAR(cipher_len);
+  KRML_MAYBE_UNUSED_VAR(tag);
+  KRML_MAYBE_UNUSED_VAR(dst);
   #if HACL_CAN_COMPILE_VALE
   bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq();
   bool has_avx = EverCrypt_AutoConfig2_has_avx();
@@ -2105,61 +2006,52 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
   {
     uint8_t ek[544U] = { 0U };
     uint8_t *keys_b0 = ek;
-    uint8_t *hkeys_b0 = ek + (uint32_t)240U;
-    KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0));
-    KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0));
+    uint8_t *hkeys_b0 = ek + 240U;
+    aes256_key_expansion(k, keys_b0);
+    aes256_keyhash_init(keys_b0, hkeys_b0);
     EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek };
     EverCrypt_AEAD_state_s *s = &p;
     if (s == NULL)
     {
       return EverCrypt_Error_InvalidKey;
     }
-    if (iv_len == (uint32_t)0U)
+    if (iv_len == 0U)
     {
       return EverCrypt_Error_InvalidIVLength;
     }
     uint8_t *ek0 = (*s).ek;
-    uint8_t *scratch_b = ek0 + (uint32_t)368U;
+    uint8_t *scratch_b = ek0 + 368U;
     uint8_t *ek1 = ek0;
     uint8_t *keys_b = ek1;
-    uint8_t *hkeys_b = ek1 + (uint32_t)240U;
+    uint8_t *hkeys_b = ek1 + 240U;
     uint8_t tmp_iv[16U] = { 0U };
-    uint32_t len = iv_len / (uint32_t)16U;
-    uint32_t bytes_len = len * (uint32_t)16U;
+    uint32_t len = iv_len / 16U;
+    uint32_t bytes_len = len * 16U;
     uint8_t *iv_b = iv;
-    memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(compute_iv_stdcall(iv_b,
-        (uint64_t)iv_len,
-        (uint64_t)len,
-        tmp_iv,
-        tmp_iv,
-        hkeys_b));
+    memcpy(tmp_iv, iv + bytes_len, iv_len % 16U * sizeof (uint8_t));
+    compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b);
     uint8_t *inout_b = scratch_b;
-    uint8_t *abytes_b = scratch_b + (uint32_t)16U;
-    uint8_t *scratch_b1 = scratch_b + (uint32_t)32U;
-    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U;
-    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / (uint32_t)16U * (uint32_t)16U;
+    uint8_t *abytes_b = scratch_b + 16U;
+    uint8_t *scratch_b1 = scratch_b + 32U;
+    uint32_t cipher_len_ = (uint32_t)(uint64_t)cipher_len / 16U * 16U;
+    uint32_t auth_len_ = (uint32_t)(uint64_t)ad_len / 16U * 16U;
     uint8_t *cipher_b_ = cipher;
     uint8_t *out_b_ = dst;
     uint8_t *auth_b_ = ad;
-    memcpy(inout_b,
-      cipher + cipher_len_,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
-    memcpy(abytes_b,
-      ad + auth_len_,
-      (uint32_t)(uint64_t)ad_len % (uint32_t)16U * sizeof (uint8_t));
-    uint64_t len128x6 = (uint64_t)cipher_len / (uint64_t)96U * (uint64_t)96U;
+    memcpy(inout_b, cipher + cipher_len_, (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
+    memcpy(abytes_b, ad + auth_len_, (uint32_t)(uint64_t)ad_len % 16U * sizeof (uint8_t));
+    uint64_t len128x6 = (uint64_t)cipher_len / 96ULL * 96ULL;
     uint64_t c;
-    if (len128x6 / (uint64_t)16U >= (uint64_t)6U)
+    if (len128x6 / 16ULL >= 6ULL)
     {
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U - len128x6;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL - len128x6;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + (uint32_t)len128x6;
       uint8_t *out128_b = out_b_ + (uint32_t)len128x6;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128x6_ = len128x6 / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128x6_ = len128x6 / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
       uint64_t
       c0 =
         gcm256_decrypt_opt(auth_b_,
@@ -2183,15 +2075,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
     }
     else
     {
-      uint32_t len128x61 = (uint32_t)0U;
-      uint64_t len128_num = (uint64_t)cipher_len / (uint64_t)16U * (uint64_t)16U;
+      uint32_t len128x61 = 0U;
+      uint64_t len128_num = (uint64_t)cipher_len / 16ULL * 16ULL;
       uint8_t *in128x6_b = cipher_b_;
       uint8_t *out128x6_b = out_b_;
       uint8_t *in128_b = cipher_b_ + len128x61;
       uint8_t *out128_b = out_b_ + len128x61;
-      uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U;
-      uint64_t len128_num_ = len128_num / (uint64_t)16U;
-      uint64_t len128x6_ = (uint64_t)0U;
+      uint64_t auth_num = (uint64_t)ad_len / 16ULL;
+      uint64_t len128_num_ = len128_num / 16ULL;
+      uint64_t len128x6_ = 0ULL;
       uint64_t
       c0 =
         gcm256_decrypt_opt(auth_b_,
@@ -2213,11 +2105,11 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm(
           tag);
       c = c0;
     }
-    memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U,
+    memcpy(dst + (uint32_t)(uint64_t)cipher_len / 16U * 16U,
       inout_b,
-      (uint32_t)(uint64_t)cipher_len % (uint32_t)16U * sizeof (uint8_t));
+      (uint32_t)(uint64_t)cipher_len % 16U * sizeof (uint8_t));
     uint64_t r = c;
-    if (r == (uint64_t)0U)
+    if (r == 0ULL)
     {
       return EverCrypt_Error_Success;
     }
@@ -2244,7 +2136,7 @@ EverCrypt_AEAD_decrypt_expand_chacha20_poly1305(
 {
   uint8_t ek[32U] = { 0U };
   EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek };
-  memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(ek, k, 32U * sizeof (uint8_t));
   EverCrypt_AEAD_state_s *s = &p;
   EverCrypt_Error_error_code
   r = decrypt_chacha20_poly1305(s, iv, iv_len, ad, ad_len, cipher, cipher_len, tag, dst);
diff --git a/src/msvc/EverCrypt_AutoConfig2.c b/src/msvc/EverCrypt_AutoConfig2.c
index b549d020..5a92d995 100644
--- a/src/msvc/EverCrypt_AutoConfig2.c
+++ b/src/msvc/EverCrypt_AutoConfig2.c
@@ -113,59 +113,59 @@ void EverCrypt_AutoConfig2_recall(void)
 void EverCrypt_AutoConfig2_init(void)
 {
   #if HACL_CAN_COMPILE_VALE
-  if (check_aesni() != (uint64_t)0U)
+  if (check_aesni() != 0ULL)
   {
     cpu_has_aesni[0U] = true;
     cpu_has_pclmulqdq[0U] = true;
   }
-  if (check_sha() != (uint64_t)0U)
+  if (check_sha() != 0ULL)
   {
     cpu_has_shaext[0U] = true;
   }
-  if (check_adx_bmi2() != (uint64_t)0U)
+  if (check_adx_bmi2() != 0ULL)
   {
     cpu_has_bmi2[0U] = true;
     cpu_has_adx[0U] = true;
   }
-  if (check_avx() != (uint64_t)0U)
+  if (check_avx() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
         cpu_has_avx[0U] = true;
       }
     }
   }
-  if (check_avx2() != (uint64_t)0U)
+  if (check_avx2() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
         cpu_has_avx2[0U] = true;
       }
     }
   }
-  if (check_sse() != (uint64_t)0U)
+  if (check_sse() != 0ULL)
   {
     cpu_has_sse[0U] = true;
   }
-  if (check_movbe() != (uint64_t)0U)
+  if (check_movbe() != 0ULL)
   {
     cpu_has_movbe[0U] = true;
   }
-  if (check_rdrand() != (uint64_t)0U)
+  if (check_rdrand() != 0ULL)
   {
     cpu_has_rdrand[0U] = true;
   }
-  if (check_avx512() != (uint64_t)0U)
+  if (check_avx512() != 0ULL)
   {
-    if (check_osxsave() != (uint64_t)0U)
+    if (check_osxsave() != 0ULL)
     {
-      if (check_avx_xcr0() != (uint64_t)0U)
+      if (check_avx_xcr0() != 0ULL)
       {
-        if (check_avx512_xcr0() != (uint64_t)0U)
+        if (check_avx512_xcr0() != 0ULL)
         {
           cpu_has_avx512[0U] = true;
           return;
diff --git a/src/msvc/EverCrypt_Chacha20Poly1305.c b/src/msvc/EverCrypt_Chacha20Poly1305.c
index 9a110bbf..e762f031 100644
--- a/src/msvc/EverCrypt_Chacha20Poly1305.c
+++ b/src/msvc/EverCrypt_Chacha20Poly1305.c
@@ -44,22 +44,22 @@ EverCrypt_Chacha20Poly1305_aead_encrypt(
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+    KRML_MAYBE_UNUSED_VAR(vec128);
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher, tag, m, mlen, aad, aadlen, k, n);
     return;
   }
   #endif
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+    KRML_MAYBE_UNUSED_VAR(vec256);
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher, tag, m, mlen, aad, aadlen, k, n);
     return;
   }
   #endif
-  KRML_HOST_IGNORE(vec128);
-  KRML_HOST_IGNORE(vec256);
-  Hacl_Chacha20Poly1305_32_aead_encrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+  KRML_MAYBE_UNUSED_VAR(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
+  Hacl_AEAD_Chacha20Poly1305_encrypt(cipher, tag, m, mlen, aad, aadlen, k, n);
 }
 
 uint32_t
@@ -79,19 +79,19 @@ EverCrypt_Chacha20Poly1305_aead_decrypt(
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
-    return Hacl_Chacha20Poly1305_256_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+    KRML_MAYBE_UNUSED_VAR(vec128);
+    return Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(m, cipher, mlen, aad, aadlen, k, n, tag);
   }
   #endif
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
-    return Hacl_Chacha20Poly1305_128_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+    KRML_MAYBE_UNUSED_VAR(vec256);
+    return Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(m, cipher, mlen, aad, aadlen, k, n, tag);
   }
   #endif
-  KRML_HOST_IGNORE(vec128);
-  KRML_HOST_IGNORE(vec256);
-  return Hacl_Chacha20Poly1305_32_aead_decrypt(k, n, aadlen, aad, mlen, m, cipher, tag);
+  KRML_MAYBE_UNUSED_VAR(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
+  return Hacl_AEAD_Chacha20Poly1305_decrypt(m, cipher, mlen, aad, aadlen, k, n, tag);
 }
 
diff --git a/src/msvc/EverCrypt_DRBG.c b/src/msvc/EverCrypt_DRBG.c
index 9591823c..1395f59f 100644
--- a/src/msvc/EverCrypt_DRBG.c
+++ b/src/msvc/EverCrypt_DRBG.c
@@ -28,15 +28,15 @@
 #include "internal/EverCrypt_HMAC.h"
 #include "lib_memzero0.h"
 
-uint32_t EverCrypt_DRBG_reseed_interval = (uint32_t)1024U;
+uint32_t EverCrypt_DRBG_reseed_interval = 1024U;
 
-uint32_t EverCrypt_DRBG_max_output_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_output_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_personalization_string_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_personalization_string_length = 65536U;
 
-uint32_t EverCrypt_DRBG_max_additional_input_length = (uint32_t)65536U;
+uint32_t EverCrypt_DRBG_max_additional_input_length = 65536U;
 
 uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
 {
@@ -44,19 +44,19 @@ uint32_t EverCrypt_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     default:
       {
@@ -92,7 +92,7 @@ EverCrypt_DRBG_uu___is_SHA1_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA1_s)
   {
     return true;
@@ -106,7 +106,7 @@ EverCrypt_DRBG_uu___is_SHA2_256_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_256_s)
   {
     return true;
@@ -120,7 +120,7 @@ EverCrypt_DRBG_uu___is_SHA2_384_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_384_s)
   {
     return true;
@@ -134,7 +134,7 @@ EverCrypt_DRBG_uu___is_SHA2_512_s(
   EverCrypt_DRBG_state_s projectee
 )
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   if (projectee.tag == SHA2_512_s)
   {
     return true;
@@ -149,10 +149,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -164,10 +164,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -179,10 +179,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -194,10 +194,10 @@ EverCrypt_DRBG_state_s *EverCrypt_DRBG_create_in(Spec_Hash_Definitions_hash_alg
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *k = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+        uint8_t *v = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         st =
           (
             (EverCrypt_DRBG_state_s){
@@ -247,7 +247,7 @@ instantiate_sha1(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA1) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t *entropy = (uint8_t *)alloca(min_entropy * sizeof (uint8_t));
@@ -285,45 +285,43 @@ instantiate_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)20U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 20U * sizeof (uint8_t));
+  memset(v, 1U, 20U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 21U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U,
+    memcpy(input0 + 21U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 21U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)21U,
+      memcpy(input + 21U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
   return true;
 }
@@ -340,7 +338,7 @@ instantiate_sha2_256(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_256) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t *entropy = (uint8_t *)alloca(min_entropy * sizeof (uint8_t));
@@ -378,45 +376,43 @@ instantiate_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)32U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 32U * sizeof (uint8_t));
+  memset(v, 1U, 32U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 33U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U,
+    memcpy(input0 + 33U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 33U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)33U,
+      memcpy(input + 33U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
   return true;
 }
@@ -433,7 +429,7 @@ instantiate_sha2_384(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_384) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t *entropy = (uint8_t *)alloca(min_entropy * sizeof (uint8_t));
@@ -471,45 +467,43 @@ instantiate_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)48U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 48U * sizeof (uint8_t));
+  memset(v, 1U, 48U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 49U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U,
+    memcpy(input0 + 49U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 49U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)49U,
+      memcpy(input + 49U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
   return true;
 }
@@ -526,7 +520,7 @@ instantiate_sha2_512(
     return false;
   }
   uint32_t entropy_input_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512);
-  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512) / (uint32_t)2U;
+  uint32_t nonce_len = Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_SHA2_512) / 2U;
   uint32_t min_entropy = entropy_input_len + nonce_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), min_entropy);
   uint8_t *entropy = (uint8_t *)alloca(min_entropy * sizeof (uint8_t));
@@ -564,45 +558,43 @@ instantiate_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  memset(k, 0U, (uint32_t)64U * sizeof (uint8_t));
-  memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t));
-  ctr[0U] = (uint32_t)1U;
-  uint32_t
-  input_len = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+  memset(k, 0U, 64U * sizeof (uint8_t));
+  memset(v, 1U, 64U * sizeof (uint8_t));
+  ctr[0U] = 1U;
+  uint32_t input_len = 65U + entropy_input_len + nonce_len + personalization_string_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U,
+    memcpy(input0 + 65U,
       seed_material,
       (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (entropy_input_len + nonce_len + personalization_string_len != 0U)
   {
-    uint32_t
-    input_len0 = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+    uint32_t input_len0 = 65U + entropy_input_len + nonce_len + personalization_string_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (entropy_input_len + nonce_len + personalization_string_len != 0U)
     {
-      memcpy(input + (uint32_t)65U,
+      memcpy(input + 65U,
         seed_material,
         (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
   return true;
 }
@@ -649,42 +641,42 @@ reseed_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 21U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U,
+    memcpy(input0 + 21U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 21U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)21U,
+      memcpy(input + 21U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -730,42 +722,42 @@ reseed_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 33U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U,
+    memcpy(input0 + 33U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 33U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)33U,
+      memcpy(input + 33U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -811,42 +803,42 @@ reseed_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 49U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U,
+    memcpy(input0 + 49U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 49U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)49U,
+      memcpy(input + 49U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -892,42 +884,42 @@ reseed_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_len;
+  uint32_t input_len = 65U + entropy_input_len + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U,
+    memcpy(input0 + 65U,
       seed_material,
       (entropy_input_len + additional_input_len) * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (entropy_input_len + additional_input_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (entropy_input_len + additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_len;
+    uint32_t input_len0 = 65U + entropy_input_len + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (entropy_input_len + additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (entropy_input_len + additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)65U,
+      memcpy(input + 65U,
         seed_material,
         (entropy_input_len + additional_input_len) * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return true;
 }
 
@@ -992,42 +984,42 @@ generate_sha1(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 21U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 20U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)21U,
+        memcpy(input0 + 21U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[20U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-      EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-      memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[20U] = 0U;
+      EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+      EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+      memcpy(k, k_, 20U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 21U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)21U,
+          memcpy(input + 21U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[20U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-        EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+        input[20U] = 1U;
+        EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+        EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+        memcpy(k, k_0, 20U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1037,16 +1029,16 @@ generate_sha1(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA1_s)
   {
-    x1 = st_s.case_SHA1_s;
+    ite = st_s.case_SHA1_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1062,87 +1054,87 @@ generate_sha1(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)21U + additional_input_len;
+    uint32_t input_len = 21U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[20U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-    EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[20U] = 0U;
+    EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+    EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+    memcpy(k, k_, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+      uint32_t input_len0 = 21U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 20U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[20U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-      EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-      memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+      input[20U] = 1U;
+      EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+      EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+      memcpy(k, k_0, 20U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)20U;
+  uint32_t max = n / 20U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha1(v, k, 20U, v, 20U);
+    memcpy(out + i * 20U, v, 20U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)20U < n)
+  if (max * 20U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)20U;
-    EverCrypt_HMAC_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 20U;
+    EverCrypt_HMAC_compute_sha1(v, k, 20U, v, 20U);
+    memcpy(block, v, (n - max * 20U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)21U + additional_input_len;
+  uint32_t input_len = 21U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 20U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[20U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-  EverCrypt_HMAC_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-  memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[20U] = 0U;
+  EverCrypt_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+  EverCrypt_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+  memcpy(k, k_, 20U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+    uint32_t input_len0 = 21U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 20U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[20U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-    EverCrypt_HMAC_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-    memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+    input[20U] = 1U;
+    EverCrypt_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+    EverCrypt_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+    memcpy(k, k_0, 20U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1207,42 +1199,42 @@ generate_sha2_256(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 33U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 32U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)33U,
+        memcpy(input0 + 33U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[32U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-      memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[32U] = 0U;
+      EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+      memcpy(k, k_, 32U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 33U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)33U,
+          memcpy(input + 33U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[32U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+        input[32U] = 1U;
+        EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+        memcpy(k, k_0, 32U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1252,16 +1244,16 @@ generate_sha2_256(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_256_s)
   {
-    x1 = st_s.case_SHA2_256_s;
+    ite = st_s.case_SHA2_256_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1277,87 +1269,87 @@ generate_sha2_256(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)33U + additional_input_len;
+    uint32_t input_len = 33U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[32U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[32U] = 0U;
+    EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+    memcpy(k, k_, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+      uint32_t input_len0 = 33U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 32U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[32U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-      memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+      input[32U] = 1U;
+      EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+      memcpy(k, k_0, 32U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)32U;
+  uint32_t max = n / 32U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+    memcpy(out + i * 32U, v, 32U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)32U < n)
+  if (max * 32U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)32U;
-    EverCrypt_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 32U;
+    EverCrypt_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+    memcpy(block, v, (n - max * 32U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)33U + additional_input_len;
+  uint32_t input_len = 33U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 32U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[32U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-  memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[32U] = 0U;
+  EverCrypt_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+  memcpy(k, k_, 32U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+    uint32_t input_len0 = 33U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 32U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[32U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-    memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+    input[32U] = 1U;
+    EverCrypt_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+    memcpy(k, k_0, 32U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1422,42 +1414,42 @@ generate_sha2_384(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 49U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 48U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)49U,
+        memcpy(input0 + 49U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[48U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-      memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[48U] = 0U;
+      EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+      memcpy(k, k_, 48U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 49U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)49U,
+          memcpy(input + 49U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[48U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+        input[48U] = 1U;
+        EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+        memcpy(k, k_0, 48U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1467,16 +1459,16 @@ generate_sha2_384(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_384_s)
   {
-    x1 = st_s.case_SHA2_384_s;
+    ite = st_s.case_SHA2_384_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1492,87 +1484,87 @@ generate_sha2_384(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)49U + additional_input_len;
+    uint32_t input_len = 49U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[48U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[48U] = 0U;
+    EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+    memcpy(k, k_, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+      uint32_t input_len0 = 49U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 48U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[48U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-      memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+      input[48U] = 1U;
+      EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+      memcpy(k, k_0, 48U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)48U;
+  uint32_t max = n / 48U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+    memcpy(out + i * 48U, v, 48U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)48U < n)
+  if (max * 48U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)48U;
-    EverCrypt_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 48U;
+    EverCrypt_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+    memcpy(block, v, (n - max * 48U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)49U + additional_input_len;
+  uint32_t input_len = 49U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 48U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[48U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-  memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[48U] = 0U;
+  EverCrypt_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+  memcpy(k, k_, 48U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+    uint32_t input_len0 = 49U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 48U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[48U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-    memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+    input[48U] = 1U;
+    EverCrypt_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+    memcpy(k, k_0, 48U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1637,42 +1629,42 @@ generate_sha2_512(
       uint8_t *k = scrut.k;
       uint8_t *v = scrut.v;
       uint32_t *ctr = scrut.reseed_counter;
-      uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_len;
+      uint32_t input_len = 65U + entropy_input_len + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
       uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
       memset(input0, 0U, input_len * sizeof (uint8_t));
       uint8_t *k_ = input0;
-      memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      memcpy(k_, v, 64U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        memcpy(input0 + (uint32_t)65U,
+        memcpy(input0 + 65U,
           seed_material,
           (entropy_input_len + additional_input_len) * sizeof (uint8_t));
       }
-      input0[64U] = (uint8_t)0U;
-      EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-      EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-      memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-      if (entropy_input_len + additional_input_len != (uint32_t)0U)
+      input0[64U] = 0U;
+      EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+      EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+      memcpy(k, k_, 64U * sizeof (uint8_t));
+      if (entropy_input_len + additional_input_len != 0U)
       {
-        uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_len;
+        uint32_t input_len0 = 65U + entropy_input_len + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
         uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
         memset(input, 0U, input_len0 * sizeof (uint8_t));
         uint8_t *k_0 = input;
-        memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_len != (uint32_t)0U)
+        memcpy(k_0, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_len != 0U)
         {
-          memcpy(input + (uint32_t)65U,
+          memcpy(input + 65U,
             seed_material,
             (entropy_input_len + additional_input_len) * sizeof (uint8_t));
         }
-        input[64U] = (uint8_t)1U;
-        EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-        EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+        input[64U] = 1U;
+        EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+        EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+        memcpy(k, k_0, 64U * sizeof (uint8_t));
       }
-      ctr[0U] = (uint32_t)1U;
+      ctr[0U] = 1U;
       result = true;
     }
     ok0 = result;
@@ -1682,16 +1674,16 @@ generate_sha2_512(
     return false;
   }
   EverCrypt_DRBG_state_s st_s = *st;
-  Hacl_HMAC_DRBG_state x1;
+  Hacl_HMAC_DRBG_state ite;
   if (st_s.tag == SHA2_512_s)
   {
-    x1 = st_s.case_SHA2_512_s;
+    ite = st_s.case_SHA2_512_s;
   }
   else
   {
-    x1 = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
+    ite = KRML_EABORT(Hacl_HMAC_DRBG_state, "unreachable (pattern matches are exhaustive in F*)");
   }
-  if (x1.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
+  if (ite.reseed_counter[0U] > Hacl_HMAC_DRBG_reseed_interval)
   {
     return false;
   }
@@ -1707,87 +1699,87 @@ generate_sha2_512(
   uint8_t *k = scrut.k;
   uint8_t *v = scrut.v;
   uint32_t *ctr = scrut.reseed_counter;
-  if (additional_input_len > (uint32_t)0U)
+  if (additional_input_len > 0U)
   {
-    uint32_t input_len = (uint32_t)65U + additional_input_len;
+    uint32_t input_len = 65U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
     uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
     memset(input0, 0U, input_len * sizeof (uint8_t));
     uint8_t *k_ = input0;
-    memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_, v, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input0[64U] = (uint8_t)0U;
-    EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-    EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    input0[64U] = 0U;
+    EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+    EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+    memcpy(k, k_, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+      uint32_t input_len0 = 65U + additional_input_len;
       KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
       uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
       memset(input, 0U, input_len0 * sizeof (uint8_t));
       uint8_t *k_0 = input;
-      memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-      if (additional_input_len != (uint32_t)0U)
+      memcpy(k_0, v, 64U * sizeof (uint8_t));
+      if (additional_input_len != 0U)
       {
-        memcpy(input + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+        memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
       }
-      input[64U] = (uint8_t)1U;
-      EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-      EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-      memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+      input[64U] = 1U;
+      EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+      EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+      memcpy(k, k_0, 64U * sizeof (uint8_t));
     }
   }
   uint8_t *output1 = output;
-  uint32_t max = n / (uint32_t)64U;
+  uint32_t max = n / 64U;
   uint8_t *out = output1;
-  for (uint32_t i = (uint32_t)0U; i < max; i++)
+  for (uint32_t i = 0U; i < max; i++)
   {
-    EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t));
+    EverCrypt_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+    memcpy(out + i * 64U, v, 64U * sizeof (uint8_t));
   }
-  if (max * (uint32_t)64U < n)
+  if (max * 64U < n)
   {
-    uint8_t *block = output1 + max * (uint32_t)64U;
-    EverCrypt_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t));
+    uint8_t *block = output1 + max * 64U;
+    EverCrypt_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+    memcpy(block, v, (n - max * 64U) * sizeof (uint8_t));
   }
-  uint32_t input_len = (uint32_t)65U + additional_input_len;
+  uint32_t input_len = 65U + additional_input_len;
   KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
   uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
   memset(input0, 0U, input_len * sizeof (uint8_t));
   uint8_t *k_ = input0;
-  memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  memcpy(k_, v, 64U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+    memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
   }
-  input0[64U] = (uint8_t)0U;
-  EverCrypt_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-  EverCrypt_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-  memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-  if (additional_input_len != (uint32_t)0U)
+  input0[64U] = 0U;
+  EverCrypt_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+  EverCrypt_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+  memcpy(k, k_, 64U * sizeof (uint8_t));
+  if (additional_input_len != 0U)
   {
-    uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+    uint32_t input_len0 = 65U + additional_input_len;
     KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
     uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
     memset(input, 0U, input_len0 * sizeof (uint8_t));
     uint8_t *k_0 = input;
-    memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-    if (additional_input_len != (uint32_t)0U)
+    memcpy(k_0, v, 64U * sizeof (uint8_t));
+    if (additional_input_len != 0U)
     {
-      memcpy(input + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+      memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
     }
-    input[64U] = (uint8_t)1U;
-    EverCrypt_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-    EverCrypt_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-    memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+    input[64U] = 1U;
+    EverCrypt_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+    EverCrypt_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+    memcpy(k, k_0, 64U * sizeof (uint8_t));
   }
   uint32_t old_ctr = ctr[0U];
-  ctr[0U] = old_ctr + (uint32_t)1U;
+  ctr[0U] = old_ctr + 1U;
   return true;
 }
 
@@ -1806,9 +1798,9 @@ static void uninstantiate_sha1(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)20U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)20U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 20U, uint8_t);
+  Lib_Memzero0_memzero(v, 20U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1830,9 +1822,9 @@ static void uninstantiate_sha2_256(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)32U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 32U, uint8_t);
+  Lib_Memzero0_memzero(v, 32U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1854,9 +1846,9 @@ static void uninstantiate_sha2_384(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)48U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 48U, uint8_t);
+  Lib_Memzero0_memzero(v, 48U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
@@ -1878,9 +1870,9 @@ static void uninstantiate_sha2_512(EverCrypt_DRBG_state_s *st)
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
-  Lib_Memzero0_memzero(k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(v, (uint32_t)64U, uint8_t);
-  ctr[0U] = (uint32_t)0U;
+  Lib_Memzero0_memzero(k, 64U, uint8_t);
+  Lib_Memzero0_memzero(v, 64U, uint8_t);
+  ctr[0U] = 0U;
   KRML_HOST_FREE(k);
   KRML_HOST_FREE(v);
   KRML_HOST_FREE(ctr);
diff --git a/src/msvc/EverCrypt_HKDF.c b/src/msvc/EverCrypt_HKDF.c
index a802095d..cbccb94f 100644
--- a/src/msvc/EverCrypt_HKDF.c
+++ b/src/msvc/EverCrypt_HKDF.c
@@ -37,39 +37,39 @@ expand_sha1(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)20U;
+  uint32_t tlen = 20U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha1(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -92,39 +92,39 @@ expand_sha2_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -147,39 +147,39 @@ expand_sha2_384(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)48U;
+  uint32_t tlen = 48U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -202,39 +202,39 @@ expand_sha2_512(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -257,39 +257,39 @@ expand_blake2s(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2s(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -312,39 +312,39 @@ expand_blake2b(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      EverCrypt_HMAC_compute_blake2b(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/msvc/EverCrypt_HMAC.c b/src/msvc/EverCrypt_HMAC.c
index f279dfb8..386cb17f 100644
--- a/src/msvc/EverCrypt_HMAC.c
+++ b/src/msvc/EverCrypt_HMAC.c
@@ -28,7 +28,9 @@
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b.h"
+#include "internal/Hacl_HMAC.h"
 #include "internal/EverCrypt_Hash.h"
 
 bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___)
@@ -67,7 +69,7 @@ bool EverCrypt_HMAC_is_supported_alg(Spec_Hash_Definitions_hash_alg uu___)
 }
 
 void
-(*EverCrypt_HMAC_hash_256)(uint8_t *x0, uint32_t x1, uint8_t *x2) =
+(*EverCrypt_HMAC_hash_256)(uint8_t *x0, uint8_t *x1, uint32_t x2) =
   EverCrypt_Hash_Incremental_hash_256;
 
 void
@@ -79,68 +81,63 @@ EverCrypt_HMAC_compute_sha1(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)20U;
+    ite = 20U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Hash_SHA1_legacy_hash(key, key_len, nkey);
+    Hacl_Hash_SHA1_hash_oneshot(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U);
+    Hacl_Hash_SHA1_update_last(s, 0ULL, ipad, 64U);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -152,25 +149,21 @@ EverCrypt_HMAC_compute_sha1(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U);
-    Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-    Hacl_Hash_SHA1_legacy_update_last(s,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-      rem,
-      rem_len);
+    Hacl_Hash_SHA1_update_multi(s, ipad, 1U);
+    Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks);
+    Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   }
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst1);
+  Hacl_Hash_SHA1_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_Hash_Core_SHA1_legacy_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)20U / block_len;
-  uint32_t rem0 = (uint32_t)20U % block_len;
+  Hacl_Hash_SHA1_init(s);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 20U / block_len;
+  uint32_t rem0 = 20U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)20U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len });
   }
   else
   {
@@ -181,13 +174,10 @@ EverCrypt_HMAC_compute_sha1(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U);
-  Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-  Hacl_Hash_SHA1_legacy_update_last(s,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-    rem,
-    rem_len);
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
+  Hacl_Hash_SHA1_update_multi(s, opad, 1U);
+  Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks);
+  Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
+  Hacl_Hash_SHA1_finish(s, dst);
 }
 
 void
@@ -199,74 +189,71 @@ EverCrypt_HMAC_compute_sha2_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    EverCrypt_HMAC_hash_256(key, key_len, nkey);
+    EverCrypt_HMAC_hash_256(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t x = Hacl_Hash_SHA2_h256[i];
     os[i] = x;);
   uint32_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)0U + (uint64_t)(uint32_t)64U,
-      (uint32_t)64U,
-      ipad,
-      s);
+    Hacl_Hash_SHA2_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -278,27 +265,24 @@ EverCrypt_HMAC_compute_sha2_256(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    EverCrypt_Hash_update_multi_256(s, ipad, (uint32_t)1U);
+    EverCrypt_Hash_update_multi_256(s, ipad, 1U);
     EverCrypt_Hash_update_multi_256(s, full_blocks, n_blocks);
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
-      + (uint64_t)full_blocks_len
-      + (uint64_t)rem_len,
+    Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len,
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst1);
+  Hacl_Hash_SHA2_sha256_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha256_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_SHA2_sha256_init(s);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -309,15 +293,13 @@ EverCrypt_HMAC_compute_sha2_256(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  EverCrypt_Hash_update_multi_256(s, opad, (uint32_t)1U);
+  EverCrypt_Hash_update_multi_256(s, opad, 1U);
   EverCrypt_Hash_update_multi_256(s, full_blocks, n_blocks);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
-    + (uint64_t)full_blocks_len
-    + (uint64_t)rem_len,
+  Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len,
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst);
+  Hacl_Hash_SHA2_sha256_finish(s, dst);
 }
 
 void
@@ -329,75 +311,75 @@ EverCrypt_HMAC_compute_sha2_384(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)48U;
+    ite = 48U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_384(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_384(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
+    uint64_t x = Hacl_Hash_SHA2_h384[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -409,27 +391,26 @@ EverCrypt_HMAC_compute_sha2_384(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_Hash_SHA2_sha384_update_nblocks(128U, ipad, s);
+    Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha384_finish(s, dst1);
+  Hacl_Hash_SHA2_sha384_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha384_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)48U / block_len;
-  uint32_t rem0 = (uint32_t)48U % block_len;
+  Hacl_Hash_SHA2_sha384_init(s);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 48U / block_len;
+  uint32_t rem0 = 48U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)48U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len });
   }
   else
   {
@@ -440,15 +421,15 @@ EverCrypt_HMAC_compute_sha2_384(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_Hash_SHA2_sha384_update_nblocks(128U, opad, s);
+  Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha384_finish(s, dst);
+  Hacl_Hash_SHA2_sha384_finish(s, dst);
 }
 
 void
@@ -460,75 +441,75 @@ EverCrypt_HMAC_compute_sha2_512(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_512(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_512(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
+    uint64_t x = Hacl_Hash_SHA2_h512[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -540,27 +521,26 @@ EverCrypt_HMAC_compute_sha2_512(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_Hash_SHA2_sha512_update_nblocks(128U, ipad, s);
+    Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha512_finish(s, dst1);
+  Hacl_Hash_SHA2_sha512_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha512_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_SHA2_sha512_init(s);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -571,15 +551,15 @@ EverCrypt_HMAC_compute_sha2_512(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_Hash_SHA2_sha512_update_nblocks(128U, opad, s);
+  Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha512_finish(s, dst);
+  Hacl_Hash_SHA2_sha512_finish(s, dst);
 }
 
 void
@@ -591,66 +571,66 @@ EverCrypt_HMAC_compute_blake2s(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_32_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2s_hash_with_key(nkey, 32U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t s[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Hash_Blake2s_init(s, 0U, 32U);
   uint32_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Hash_Blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -663,34 +643,33 @@ EverCrypt_HMAC_compute_blake2s(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     uint32_t wv0[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Hash_Blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
       full_blocks,
       n_blocks);
     uint32_t wv1[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last(rem_len,
+    Hacl_Hash_Blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Hash_Blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_32_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_Blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -702,22 +681,22 @@ EverCrypt_HMAC_compute_blake2s(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint32_t wv[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   uint32_t wv0[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Hash_Blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
     full_blocks,
     n_blocks);
   uint32_t wv1[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_last(rem_len,
+  Hacl_Hash_Blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Hash_Blake2s_finish(32U, dst, s0);
 }
 
 void
@@ -729,71 +708,66 @@ EverCrypt_HMAC_compute_blake2b(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_32_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2b_hash_with_key(nkey, 64U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t s[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Hash_Blake2b_init(s, 0U, 64U);
   uint64_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last((uint32_t)128U,
-      wv,
-      s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
-      ipad);
+    Hacl_Hash_Blake2b_update_last(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -806,40 +780,34 @@ EverCrypt_HMAC_compute_blake2b(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-      wv,
-      s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      ipad,
-      (uint32_t)1U);
+    Hacl_Hash_Blake2b_update_multi(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), ipad, 1U);
     uint64_t wv0[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Hash_Blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
       full_blocks,
       n_blocks);
     uint64_t wv1[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last(rem_len,
+    Hacl_Hash_Blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Hash_Blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_32_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_Blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -851,28 +819,23 @@ EverCrypt_HMAC_compute_blake2b(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint64_t wv[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-    wv,
-    s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-    opad,
-    (uint32_t)1U);
+  Hacl_Hash_Blake2b_update_multi(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), opad, 1U);
   uint64_t wv0[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Hash_Blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
     full_blocks,
     n_blocks);
   uint64_t wv1[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_last(rem_len,
+  Hacl_Hash_Blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Hash_Blake2b_finish(64U, dst, s0);
 }
 
 void
diff --git a/src/msvc/EverCrypt_Hash.c b/src/msvc/EverCrypt_Hash.c
index b88df9e2..1adf2f1d 100644
--- a/src/msvc/EverCrypt_Hash.c
+++ b/src/msvc/EverCrypt_Hash.c
@@ -31,6 +31,10 @@
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
 #include "internal/Hacl_Hash_MD5.h"
+#include "internal/Hacl_Hash_Blake2s_Simd128.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b_Simd256.h"
+#include "internal/Hacl_Hash_Blake2b.h"
 #include "config.h"
 
 #define MD5_s 0
@@ -146,61 +150,61 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = MD5_s, { .case_MD5_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA1_s, { .case_SHA1_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_224_s, { .case_SHA2_224_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_256_s, { .case_SHA2_256_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_384_s, { .case_SHA2_384_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA2_512_s, { .case_SHA2_512_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_224_s, { .case_SHA3_224_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_256_s, { .case_SHA3_256_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_384_s, { .case_SHA3_384_s = buf } });
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = SHA3_512_s, { .case_SHA3_512_s = buf } });
         break;
       }
@@ -214,17 +218,17 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
             (
               (EverCrypt_Hash_state_s){
                 .tag = Blake2S_128_s,
-                { .case_Blake2S_128_s = Hacl_Blake2s_128_blake2s_malloc() }
+                { .case_Blake2S_128_s = Hacl_Hash_Blake2s_Simd128_malloc_with_key() }
               }
             );
         }
         else
         {
-          uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+          uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
           s = ((EverCrypt_Hash_state_s){ .tag = Blake2S_s, { .case_Blake2S_s = buf } });
         }
         #else
-        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
+        uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
         s = ((EverCrypt_Hash_state_s){ .tag = Blake2S_s, { .case_Blake2S_s = buf } });
         #endif
         break;
@@ -239,17 +243,17 @@ static EverCrypt_Hash_state_s *create_in(Spec_Hash_Definitions_hash_alg a)
             (
               (EverCrypt_Hash_state_s){
                 .tag = Blake2B_256_s,
-                { .case_Blake2B_256_s = Hacl_Blake2b_256_blake2b_malloc() }
+                { .case_Blake2B_256_s = Hacl_Hash_Blake2b_Simd256_malloc_with_key() }
               }
             );
         }
         else
         {
-          uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+          uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
           s = ((EverCrypt_Hash_state_s){ .tag = Blake2B_s, { .case_Blake2B_s = buf } });
         }
         #else
-        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
+        uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
         s = ((EverCrypt_Hash_state_s){ .tag = Blake2B_s, { .case_Blake2B_s = buf } });
         #endif
         break;
@@ -272,94 +276,94 @@ static void init(EverCrypt_Hash_state_s *s)
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    Hacl_Hash_Core_MD5_legacy_init(p1);
+    Hacl_Hash_MD5_init(p1);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    Hacl_Hash_Core_SHA1_legacy_init(p1);
+    Hacl_Hash_SHA1_init(p1);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    Hacl_SHA2_Scalar32_sha224_init(p1);
+    Hacl_Hash_SHA2_sha224_init(p1);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    Hacl_SHA2_Scalar32_sha256_init(p1);
+    Hacl_Hash_SHA2_sha256_init(p1);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    Hacl_SHA2_Scalar32_sha384_init(p1);
+    Hacl_Hash_SHA2_sha384_init(p1);
     return;
   }
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    Hacl_SHA2_Scalar32_sha512_init(p1);
+    Hacl_Hash_SHA2_sha512_init(p1);
     return;
   }
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    memset(p1, 0U, (uint32_t)25U * sizeof (uint64_t));
+    memset(p1, 0U, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    Hacl_Blake2s_32_blake2s_init(p1, (uint32_t)0U, (uint32_t)32U);
+    Hacl_Hash_Blake2s_init(p1, 0U, 32U);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    Hacl_Blake2s_128_blake2s_init(p1, (uint32_t)0U, (uint32_t)32U);
+    Hacl_Hash_Blake2s_Simd128_init(p1, 0U, 32U);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    Hacl_Blake2b_32_blake2b_init(p1, (uint32_t)0U, (uint32_t)64U);
+    Hacl_Hash_Blake2b_init(p1, 0U, 64U);
     return;
   }
   if (scrut.tag == Blake2B_256_s)
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    Hacl_Blake2b_256_blake2b_init(p1, (uint32_t)0U, (uint32_t)64U);
+    Hacl_Hash_Blake2b_Simd256_init(p1, 0U, 64U);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -373,22 +377,16 @@ static void init(EverCrypt_Hash_state_s *s)
 static uint32_t
 k224_256[64U] =
   {
-    (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U,
-    (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U,
-    (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U,
-    (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U,
-    (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU,
-    (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU,
-    (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U,
-    (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U,
-    (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U,
-    (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U,
-    (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U,
-    (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U,
-    (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U,
-    (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U,
-    (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U,
-    (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U
+    0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U,
+    0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU,
+    0x9bdc06a7U, 0xc19bf174U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU,
+    0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U,
+    0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU,
+    0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU,
+    0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U,
+    0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
+    0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U,
+    0xc67178f2U
   };
 
 void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n)
@@ -399,13 +397,13 @@ void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n)
   if (has_shaext && has_sse)
   {
     uint64_t n1 = (uint64_t)n;
-    KRML_HOST_IGNORE(sha256_update(s, blocks, n1, k224_256));
+    sha256_update(s, blocks, n1, k224_256);
     return;
   }
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s);
+  Hacl_Hash_SHA2_sha256_update_nblocks(n * 64U, blocks, s);
   #else
   KRML_HOST_IGNORE(k224_256);
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s);
+  Hacl_Hash_SHA2_sha256_update_nblocks(n * 64U, blocks, s);
   #endif
 }
 
@@ -416,100 +414,100 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    uint32_t n = len / (uint32_t)64U;
-    Hacl_Hash_MD5_legacy_update_multi(p1, blocks, n);
+    uint32_t n = len / 64U;
+    Hacl_Hash_MD5_update_multi(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    uint32_t n = len / (uint32_t)64U;
-    Hacl_Hash_SHA1_legacy_update_multi(p1, blocks, n);
+    uint32_t n = len / 64U;
+    Hacl_Hash_SHA1_update_multi(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     EverCrypt_Hash_update_multi_256(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     EverCrypt_Hash_update_multi_256(p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    uint32_t n = len / (uint32_t)128U;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n * (uint32_t)128U, blocks, p1);
+    uint32_t n = len / 128U;
+    Hacl_Hash_SHA2_sha384_update_nblocks(n * 128U, blocks, p1);
     return;
   }
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    uint32_t n = len / (uint32_t)128U;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n * (uint32_t)128U, blocks, p1);
+    uint32_t n = len / 128U;
+    Hacl_Hash_SHA2_sha512_update_nblocks(n * 128U, blocks, p1);
     return;
   }
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    uint32_t n = len / (uint32_t)144U;
+    uint32_t n = len / 144U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_224, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    uint32_t n = len / (uint32_t)136U;
+    uint32_t n = len / 136U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_256, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    uint32_t n = len / (uint32_t)104U;
+    uint32_t n = len / 104U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_384, p1, blocks, n);
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    uint32_t n = len / (uint32_t)72U;
+    uint32_t n = len / 72U;
     Hacl_Hash_SHA3_update_multi_sha3(Spec_Hash_Definitions_SHA3_512, p1, blocks, n);
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n * (uint32_t)64U, wv, p1, prevlen, blocks, n);
+    Hacl_Hash_Blake2s_update_multi(n * 64U, wv, p1, prevlen, blocks, n);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    uint32_t n = len / (uint32_t)64U;
+    uint32_t n = len / 64U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi(n * (uint32_t)64U, wv, p1, prevlen, blocks, n);
+    Hacl_Hash_Blake2s_Simd128_update_multi(n * 64U, wv, p1, prevlen, blocks, n);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    uint32_t n = len / (uint32_t)128U;
+    uint32_t n = len / 128U;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n * (uint32_t)128U,
+    Hacl_Hash_Blake2b_update_multi(n * 128U,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prevlen),
@@ -521,9 +519,9 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    uint32_t n = len / (uint32_t)128U;
+    uint32_t n = len / 128U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi(n * (uint32_t)128U,
+    Hacl_Hash_Blake2b_Simd256_update_multi(n * 128U,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prevlen),
@@ -531,7 +529,7 @@ update_multi(EverCrypt_Hash_state_s *s, uint64_t prevlen, uint8_t *blocks, uint3
       n);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -549,31 +547,31 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    Hacl_Hash_MD5_legacy_update_last(p1, prev_len, last, last_len);
+    Hacl_Hash_MD5_update_last(p1, prev_len, last, last_len);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    Hacl_Hash_SHA1_legacy_update_last(p1, prev_len, last, last_len);
+    Hacl_Hash_SHA1_update_last(p1, prev_len, last, last_len);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    Hacl_SHA2_Scalar32_sha224_update_last(prev_len + (uint64_t)last_len, last_len, last, p1);
+    Hacl_Hash_SHA2_sha224_update_last(prev_len + (uint64_t)last_len, last_len, last, p1);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    Hacl_SHA2_Scalar32_sha256_update_last(prev_len + (uint64_t)last_len, last_len, last, p1);
+    Hacl_Hash_SHA2_sha256_update_last(prev_len + (uint64_t)last_len, last_len, last, p1);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len),
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len),
         FStar_UInt128_uint64_to_uint128((uint64_t)last_len)),
       last_len,
       last,
@@ -583,7 +581,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len),
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len),
         FStar_UInt128_uint64_to_uint128((uint64_t)last_len)),
       last_len,
       last,
@@ -618,7 +616,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last(last_len, wv, p1, prev_len, last_len, last);
+    Hacl_Hash_Blake2s_update_last(last_len, wv, p1, prev_len, last_len, last);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
@@ -626,10 +624,10 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_last(last_len, wv, p1, prev_len, last_len, last);
+    Hacl_Hash_Blake2s_Simd128_update_last(last_len, wv, p1, prev_len, last_len, last);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -637,7 +635,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last(last_len,
+    Hacl_Hash_Blake2b_update_last(last_len,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prev_len),
@@ -650,7 +648,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_last(last_len,
+    Hacl_Hash_Blake2b_Simd256_update_last(last_len,
       wv,
       p1,
       FStar_UInt128_uint64_to_uint128(prev_len),
@@ -658,7 +656,7 @@ update_last(EverCrypt_Hash_state_s *s, uint64_t prev_len, uint8_t *last, uint32_
       last);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -675,94 +673,94 @@ static void finish(EverCrypt_Hash_state_s *s, uint8_t *dst)
   if (scrut.tag == MD5_s)
   {
     uint32_t *p1 = scrut.case_MD5_s;
-    Hacl_Hash_Core_MD5_legacy_finish(p1, dst);
+    Hacl_Hash_MD5_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA1_s)
   {
     uint32_t *p1 = scrut.case_SHA1_s;
-    Hacl_Hash_Core_SHA1_legacy_finish(p1, dst);
+    Hacl_Hash_SHA1_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA2_224_s)
   {
     uint32_t *p1 = scrut.case_SHA2_224_s;
-    Hacl_SHA2_Scalar32_sha224_finish(p1, dst);
+    Hacl_Hash_SHA2_sha224_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA2_256_s)
   {
     uint32_t *p1 = scrut.case_SHA2_256_s;
-    Hacl_SHA2_Scalar32_sha256_finish(p1, dst);
+    Hacl_Hash_SHA2_sha256_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA2_384_s)
   {
     uint64_t *p1 = scrut.case_SHA2_384_s;
-    Hacl_SHA2_Scalar32_sha384_finish(p1, dst);
+    Hacl_Hash_SHA2_sha384_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA2_512_s)
   {
     uint64_t *p1 = scrut.case_SHA2_512_s;
-    Hacl_SHA2_Scalar32_sha512_finish(p1, dst);
+    Hacl_Hash_SHA2_sha512_finish(p1, dst);
     return;
   }
   if (scrut.tag == SHA3_224_s)
   {
     uint64_t *p1 = scrut.case_SHA3_224_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)144U, (uint32_t)28U, dst);
+    Hacl_Hash_SHA3_squeeze0(p1, 144U, 28U, dst);
     return;
   }
   if (scrut.tag == SHA3_256_s)
   {
     uint64_t *p1 = scrut.case_SHA3_256_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)136U, (uint32_t)32U, dst);
+    Hacl_Hash_SHA3_squeeze0(p1, 136U, 32U, dst);
     return;
   }
   if (scrut.tag == SHA3_384_s)
   {
     uint64_t *p1 = scrut.case_SHA3_384_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)104U, (uint32_t)48U, dst);
+    Hacl_Hash_SHA3_squeeze0(p1, 104U, 48U, dst);
     return;
   }
   if (scrut.tag == SHA3_512_s)
   {
     uint64_t *p1 = scrut.case_SHA3_512_s;
-    Hacl_Impl_SHA3_squeeze(p1, (uint32_t)72U, (uint32_t)64U, dst);
+    Hacl_Hash_SHA3_squeeze0(p1, 72U, 64U, dst);
     return;
   }
   if (scrut.tag == Blake2S_s)
   {
     uint32_t *p1 = scrut.case_Blake2S_s;
-    Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, p1);
+    Hacl_Hash_Blake2s_finish(32U, dst, p1);
     return;
   }
   if (scrut.tag == Blake2S_128_s)
   {
     Lib_IntVector_Intrinsics_vec128 *p1 = scrut.case_Blake2S_128_s;
     #if HACL_CAN_COMPILE_VEC128
-    Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, p1);
+    Hacl_Hash_Blake2s_Simd128_finish(32U, dst, p1);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
   if (scrut.tag == Blake2B_s)
   {
     uint64_t *p1 = scrut.case_Blake2B_s;
-    Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, p1);
+    Hacl_Hash_Blake2b_finish(64U, dst, p1);
     return;
   }
   if (scrut.tag == Blake2B_256_s)
   {
     Lib_IntVector_Intrinsics_vec256 *p1 = scrut.case_Blake2B_256_s;
     #if HACL_CAN_COMPILE_VEC256
-    Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, p1);
+    Hacl_Hash_Blake2b_Simd256_finish(64U, dst, p1);
     return;
     #else
-    KRML_HOST_IGNORE(p1);
+    KRML_MAYBE_UNUSED_VAR(p1);
     return;
     #endif
   }
@@ -873,7 +871,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)4U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 4U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA1_s)
@@ -889,7 +887,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)5U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 5U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_224_s)
@@ -905,7 +903,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_256_s)
@@ -921,7 +919,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint32_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint32_t));
     return;
   }
   if (scrut0.tag == SHA2_384_s)
@@ -937,7 +935,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA2_512_s)
@@ -953,7 +951,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)8U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 8U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_224_s)
@@ -969,7 +967,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_256_s)
@@ -985,7 +983,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_384_s)
@@ -1001,7 +999,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == SHA3_512_s)
@@ -1017,7 +1015,7 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     {
       p_dst = KRML_EABORT(uint64_t *, "unreachable (pattern matches are exhaustive in F*)");
     }
-    memcpy(p_dst, p_src, (uint32_t)25U * sizeof (uint64_t));
+    memcpy(p_dst, p_src, 25U * sizeof (uint64_t));
     return;
   }
   if (scrut0.tag == Blake2S_s)
@@ -1027,17 +1025,17 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2S_s)
     {
       uint32_t *p_dst = scrut.case_Blake2S_s;
-      memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint32_t));
+      memcpy(p_dst, p_src, 16U * sizeof (uint32_t));
       return;
     }
     if (scrut.tag == Blake2S_128_s)
     {
       Lib_IntVector_Intrinsics_vec128 *p_dst = scrut.case_Blake2S_128_s;
       #if HACL_CAN_COMPILE_VEC128
-      Hacl_Blake2s_128_load_state128s_from_state32(p_dst, p_src);
+      Hacl_Hash_Blake2s_Simd128_load_state128s_from_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1054,17 +1052,17 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2B_s)
     {
       uint64_t *p_dst = scrut.case_Blake2B_s;
-      memcpy(p_dst, p_src, (uint32_t)16U * sizeof (uint64_t));
+      memcpy(p_dst, p_src, 16U * sizeof (uint64_t));
       return;
     }
     if (scrut.tag == Blake2B_256_s)
     {
       Lib_IntVector_Intrinsics_vec256 *p_dst = scrut.case_Blake2B_256_s;
       #if HACL_CAN_COMPILE_VEC256
-      Hacl_Blake2b_256_load_state256b_from_state32(p_dst, p_src);
+      Hacl_Hash_Blake2b_Simd256_load_state256b_from_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1081,17 +1079,17 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2S_128_s)
     {
       Lib_IntVector_Intrinsics_vec128 *p_dst = scrut.case_Blake2S_128_s;
-      memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+      memcpy(p_dst, p_src, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
       return;
     }
     if (scrut.tag == Blake2S_s)
     {
       uint32_t *p_dst = scrut.case_Blake2S_s;
       #if HACL_CAN_COMPILE_VEC128
-      Hacl_Blake2s_128_store_state128s_to_state32(p_dst, p_src);
+      Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1108,17 +1106,17 @@ static void copy(EverCrypt_Hash_state_s *s_src, EverCrypt_Hash_state_s *s_dst)
     if (scrut.tag == Blake2B_256_s)
     {
       Lib_IntVector_Intrinsics_vec256 *p_dst = scrut.case_Blake2B_256_s;
-      memcpy(p_dst, p_src, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+      memcpy(p_dst, p_src, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
       return;
     }
     if (scrut.tag == Blake2B_s)
     {
       uint64_t *p_dst = scrut.case_Blake2B_s;
       #if HACL_CAN_COMPILE_VEC256
-      Hacl_Blake2b_256_store_state256b_to_state32(p_dst, p_src);
+      Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32(p_dst, p_src);
       return;
       #else
-      KRML_HOST_IGNORE(p_dst);
+      KRML_MAYBE_UNUSED_VAR(p_dst);
       return;
       #endif
     }
@@ -1201,59 +1199,59 @@ static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     default:
       {
@@ -1269,18 +1267,18 @@ choice of algorithm (see Hacl_Spec.h). This API will automatically pick the most
 efficient implementation, provided you have called EverCrypt_AutoConfig2_init()
 before. The state is to be freed by calling `free`.
 */
-EverCrypt_Hash_Incremental_hash_state
-*EverCrypt_Hash_Incremental_create_in(Spec_Hash_Definitions_hash_alg a)
+EverCrypt_Hash_Incremental_state_t
+*EverCrypt_Hash_Incremental_malloc(Spec_Hash_Definitions_hash_alg a)
 {
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(a));
   uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
   EverCrypt_Hash_state_s *block_state = create_in(a);
-  EverCrypt_Hash_Incremental_hash_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  EverCrypt_Hash_Incremental_hash_state
+  EverCrypt_Hash_Incremental_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  EverCrypt_Hash_Incremental_state_t
   *p =
-    (EverCrypt_Hash_Incremental_hash_state *)KRML_HOST_MALLOC(sizeof (
-        EverCrypt_Hash_Incremental_hash_state
+    (EverCrypt_Hash_Incremental_state_t *)KRML_HOST_MALLOC(sizeof (
+        EverCrypt_Hash_Incremental_state_t
       ));
   p[0U] = s;
   init(block_state);
@@ -1290,17 +1288,17 @@ EverCrypt_Hash_Incremental_hash_state
 /**
 Reset an existing state to the initial hash state with empty data.
 */
-void EverCrypt_Hash_Incremental_init(EverCrypt_Hash_Incremental_hash_state *s)
+void EverCrypt_Hash_Incremental_reset(EverCrypt_Hash_Incremental_state_t *state)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *s;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   uint8_t *buf = scrut.buf;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   Spec_Hash_Definitions_hash_alg i = alg_of_state(block_state);
-  KRML_HOST_IGNORE(i);
+  KRML_MAYBE_UNUSED_VAR(i);
   init(block_state);
-  EverCrypt_Hash_Incremental_hash_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  EverCrypt_Hash_Incremental_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 /**
@@ -1312,86 +1310,86 @@ algorithm. Both limits are unlikely to be attained in practice.
 */
 EverCrypt_Error_error_code
 EverCrypt_Hash_Incremental_update(
-  EverCrypt_Hash_Incremental_hash_state *s,
-  uint8_t *data,
-  uint32_t len
+  EverCrypt_Hash_Incremental_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
 )
 {
-  EverCrypt_Hash_Incremental_hash_state s1 = *s;
-  EverCrypt_Hash_state_s *block_state = s1.block_state;
-  uint64_t total_len = s1.total_len;
+  EverCrypt_Hash_Incremental_state_t s = *state;
+  EverCrypt_Hash_state_s *block_state = s.block_state;
+  uint64_t total_len = s.total_len;
   Spec_Hash_Definitions_hash_alg i1 = alg_of_state(block_state);
   uint64_t sw;
   switch (i1)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        sw = (uint64_t)2305843009213693951U;
+        sw = 2305843009213693951ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        sw = (uint64_t)18446744073709551615U;
+        sw = 18446744073709551615ULL;
         break;
       }
     default:
@@ -1401,14 +1399,14 @@ EverCrypt_Hash_Incremental_update(
       }
   }
   Hacl_Streaming_Types_error_code ite;
-  if ((uint64_t)len > sw - total_len)
+  if ((uint64_t)chunk_len > sw - total_len)
   {
     ite = Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   else
   {
     uint32_t sz;
-    if (total_len % (uint64_t)block_len(i1) == (uint64_t)0U && total_len > (uint64_t)0U)
+    if (total_len % (uint64_t)block_len(i1) == 0ULL && total_len > 0ULL)
     {
       sz = block_len(i1);
     }
@@ -1416,14 +1414,14 @@ EverCrypt_Hash_Incremental_update(
     {
       sz = (uint32_t)(total_len % (uint64_t)block_len(i1));
     }
-    if (len <= block_len(i1) - sz)
+    if (chunk_len <= block_len(i1) - sz)
     {
-      EverCrypt_Hash_Incremental_hash_state s2 = *s;
-      EverCrypt_Hash_state_s *block_state1 = s2.block_state;
-      uint8_t *buf = s2.buf;
-      uint64_t total_len1 = s2.total_len;
+      EverCrypt_Hash_Incremental_state_t s1 = *state;
+      EverCrypt_Hash_state_s *block_state1 = s1.block_state;
+      uint8_t *buf = s1.buf;
+      uint64_t total_len1 = s1.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1432,26 +1430,26 @@ EverCrypt_Hash_Incremental_update(
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
       uint8_t *buf2 = buf + sz1;
-      memcpy(buf2, data, len * sizeof (uint8_t));
-      uint64_t total_len2 = total_len1 + (uint64_t)len;
-      *s
+      memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+      uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+      *state
       =
         (
-          (EverCrypt_Hash_Incremental_hash_state){
+          (EverCrypt_Hash_Incremental_state_t){
             .block_state = block_state1,
             .buf = buf,
             .total_len = total_len2
           }
         );
     }
-    else if (sz == (uint32_t)0U)
+    else if (sz == 0U)
     {
-      EverCrypt_Hash_Incremental_hash_state s2 = *s;
-      EverCrypt_Hash_state_s *block_state1 = s2.block_state;
-      uint8_t *buf = s2.buf;
-      uint64_t total_len1 = s2.total_len;
+      EverCrypt_Hash_Incremental_state_t s1 = *state;
+      EverCrypt_Hash_state_s *block_state1 = s1.block_state;
+      uint8_t *buf = s1.buf;
+      uint64_t total_len1 = s1.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1459,49 +1457,49 @@ EverCrypt_Hash_Incremental_update(
       {
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
-      if (!(sz1 == (uint32_t)0U))
+      if (!(sz1 == 0U))
       {
         uint64_t prevlen = total_len1 - (uint64_t)sz1;
         update_multi(block_state1, prevlen, buf, block_len(i1));
       }
       uint32_t ite0;
-      if ((uint64_t)len % (uint64_t)block_len(i1) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+      if ((uint64_t)chunk_len % (uint64_t)block_len(i1) == 0ULL && (uint64_t)chunk_len > 0ULL)
       {
         ite0 = block_len(i1);
       }
       else
       {
-        ite0 = (uint32_t)((uint64_t)len % (uint64_t)block_len(i1));
+        ite0 = (uint32_t)((uint64_t)chunk_len % (uint64_t)block_len(i1));
       }
-      uint32_t n_blocks = (len - ite0) / block_len(i1);
+      uint32_t n_blocks = (chunk_len - ite0) / block_len(i1);
       uint32_t data1_len = n_blocks * block_len(i1);
-      uint32_t data2_len = len - data1_len;
-      uint8_t *data1 = data;
-      uint8_t *data2 = data + data1_len;
+      uint32_t data2_len = chunk_len - data1_len;
+      uint8_t *data1 = chunk;
+      uint8_t *data2 = chunk + data1_len;
       update_multi(block_state1, total_len1, data1, data1_len);
       uint8_t *dst = buf;
       memcpy(dst, data2, data2_len * sizeof (uint8_t));
-      *s
+      *state
       =
         (
-          (EverCrypt_Hash_Incremental_hash_state){
+          (EverCrypt_Hash_Incremental_state_t){
             .block_state = block_state1,
             .buf = buf,
-            .total_len = total_len1 + (uint64_t)len
+            .total_len = total_len1 + (uint64_t)chunk_len
           }
         );
     }
     else
     {
       uint32_t diff = block_len(i1) - sz;
-      uint8_t *data1 = data;
-      uint8_t *data2 = data + diff;
-      EverCrypt_Hash_Incremental_hash_state s2 = *s;
-      EverCrypt_Hash_state_s *block_state10 = s2.block_state;
-      uint8_t *buf0 = s2.buf;
-      uint64_t total_len10 = s2.total_len;
+      uint8_t *chunk1 = chunk;
+      uint8_t *chunk2 = chunk + diff;
+      EverCrypt_Hash_Incremental_state_t s1 = *state;
+      EverCrypt_Hash_state_s *block_state10 = s1.block_state;
+      uint8_t *buf0 = s1.buf;
+      uint64_t total_len10 = s1.total_len;
       uint32_t sz10;
-      if (total_len10 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+      if (total_len10 % (uint64_t)block_len(i1) == 0ULL && total_len10 > 0ULL)
       {
         sz10 = block_len(i1);
       }
@@ -1510,23 +1508,23 @@ EverCrypt_Hash_Incremental_update(
         sz10 = (uint32_t)(total_len10 % (uint64_t)block_len(i1));
       }
       uint8_t *buf2 = buf0 + sz10;
-      memcpy(buf2, data1, diff * sizeof (uint8_t));
+      memcpy(buf2, chunk1, diff * sizeof (uint8_t));
       uint64_t total_len2 = total_len10 + (uint64_t)diff;
-      *s
+      *state
       =
         (
-          (EverCrypt_Hash_Incremental_hash_state){
+          (EverCrypt_Hash_Incremental_state_t){
             .block_state = block_state10,
             .buf = buf0,
             .total_len = total_len2
           }
         );
-      EverCrypt_Hash_Incremental_hash_state s20 = *s;
-      EverCrypt_Hash_state_s *block_state1 = s20.block_state;
-      uint8_t *buf = s20.buf;
-      uint64_t total_len1 = s20.total_len;
+      EverCrypt_Hash_Incremental_state_t s10 = *state;
+      EverCrypt_Hash_state_s *block_state1 = s10.block_state;
+      uint8_t *buf = s10.buf;
+      uint64_t total_len1 = s10.total_len;
       uint32_t sz1;
-      if (total_len1 % (uint64_t)block_len(i1) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+      if (total_len1 % (uint64_t)block_len(i1) == 0ULL && total_len1 > 0ULL)
       {
         sz1 = block_len(i1);
       }
@@ -1534,7 +1532,7 @@ EverCrypt_Hash_Incremental_update(
       {
         sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i1));
       }
-      if (!(sz1 == (uint32_t)0U))
+      if (!(sz1 == 0U))
       {
         uint64_t prevlen = total_len1 - (uint64_t)sz1;
         update_multi(block_state1, prevlen, buf, block_len(i1));
@@ -1542,33 +1540,33 @@ EverCrypt_Hash_Incremental_update(
       uint32_t ite0;
       if
       (
-        (uint64_t)(len - diff)
+        (uint64_t)(chunk_len - diff)
         % (uint64_t)block_len(i1)
-        == (uint64_t)0U
-        && (uint64_t)(len - diff) > (uint64_t)0U
+        == 0ULL
+        && (uint64_t)(chunk_len - diff) > 0ULL
       )
       {
         ite0 = block_len(i1);
       }
       else
       {
-        ite0 = (uint32_t)((uint64_t)(len - diff) % (uint64_t)block_len(i1));
+        ite0 = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)block_len(i1));
       }
-      uint32_t n_blocks = (len - diff - ite0) / block_len(i1);
+      uint32_t n_blocks = (chunk_len - diff - ite0) / block_len(i1);
       uint32_t data1_len = n_blocks * block_len(i1);
-      uint32_t data2_len = len - diff - data1_len;
-      uint8_t *data11 = data2;
-      uint8_t *data21 = data2 + data1_len;
-      update_multi(block_state1, total_len1, data11, data1_len);
+      uint32_t data2_len = chunk_len - diff - data1_len;
+      uint8_t *data1 = chunk2;
+      uint8_t *data2 = chunk2 + data1_len;
+      update_multi(block_state1, total_len1, data1, data1_len);
       uint8_t *dst = buf;
-      memcpy(dst, data21, data2_len * sizeof (uint8_t));
-      *s
+      memcpy(dst, data2, data2_len * sizeof (uint8_t));
+      *state
       =
         (
-          (EverCrypt_Hash_Incremental_hash_state){
+          (EverCrypt_Hash_Incremental_state_t){
             .block_state = block_state1,
             .buf = buf,
-            .total_len = total_len1 + (uint64_t)(len - diff)
+            .total_len = total_len1 + (uint64_t)(chunk_len - diff)
           }
         );
     }
@@ -1592,20 +1590,14 @@ EverCrypt_Hash_Incremental_update(
   }
 }
 
-static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_md5(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_MD5)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_MD5) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_MD5);
   }
@@ -1620,7 +1612,7 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_MD5) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_MD5) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_MD5);
   }
@@ -1630,26 +1622,20 @@ static void finish_md5(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha1(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA1)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA1) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA1);
   }
@@ -1664,7 +1650,7 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA1) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA1) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA1);
   }
@@ -1674,26 +1660,21 @@ static void finish_sha1(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha224(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_224)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_224) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_224);
   }
@@ -1708,7 +1689,7 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_224) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_224) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_224);
   }
@@ -1718,26 +1699,21 @@ static void finish_sha224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha256(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_256)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_256) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_256);
   }
@@ -1752,7 +1728,7 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_256) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_256) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_256);
   }
@@ -1762,26 +1738,21 @@ static void finish_sha256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha3_224(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_224)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_224) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_224);
   }
@@ -1796,7 +1767,7 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_224) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_224) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_224);
   }
@@ -1806,26 +1777,21 @@ static void finish_sha3_224(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha3_256(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_256)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_256) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_256);
   }
@@ -1840,7 +1806,7 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_256) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_256) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_256);
   }
@@ -1850,26 +1816,21 @@ static void finish_sha3_256(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha3_384(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_384)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_384) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_384);
   }
@@ -1884,7 +1845,7 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_384) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_384) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_384);
   }
@@ -1894,26 +1855,21 @@ static void finish_sha3_384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha3_512(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_512)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA3_512) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA3_512);
   }
@@ -1928,7 +1884,7 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA3_512) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA3_512) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA3_512);
   }
@@ -1938,26 +1894,21 @@ static void finish_sha3_512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *d
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha384(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_384)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_384) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_384);
   }
@@ -1972,7 +1923,7 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_384) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_384) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_384);
   }
@@ -1982,26 +1933,21 @@ static void finish_sha384(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_sha512(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
   if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_512)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  (total_len % (uint64_t)block_len(Spec_Hash_Definitions_SHA2_512) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_SHA2_512);
   }
@@ -2016,7 +1962,7 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_SHA2_512) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_SHA2_512) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_SHA2_512);
   }
@@ -2026,26 +1972,20 @@ static void finish_sha512(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_blake2s(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_Blake2S)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2S) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_Blake2S);
   }
@@ -2075,7 +2015,7 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_Blake2S) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_Blake2S) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_Blake2S);
   }
@@ -2085,26 +2025,20 @@ static void finish_blake2s(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
-static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *dst)
+static void digest_blake2b(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *p;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if
-  (
-    total_len
-    % (uint64_t)block_len(Spec_Hash_Definitions_Blake2B)
-    == (uint64_t)0U
-    && total_len > (uint64_t)0U
-  )
+  if (total_len % (uint64_t)block_len(Spec_Hash_Definitions_Blake2B) == 0ULL && total_len > 0ULL)
   {
     r = block_len(Spec_Hash_Definitions_Blake2B);
   }
@@ -2134,7 +2068,7 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   copy(block_state, &tmp_block_state);
   uint64_t prev_len = total_len - (uint64_t)r;
   uint32_t ite;
-  if (r % block_len(Spec_Hash_Definitions_Blake2B) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(Spec_Hash_Definitions_Blake2B) == 0U && r > 0U)
   {
     ite = block_len(Spec_Hash_Definitions_Blake2B);
   }
@@ -2144,93 +2078,94 @@ static void finish_blake2b(EverCrypt_Hash_Incremental_hash_state *p, uint8_t *ds
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  update_multi(&tmp_block_state, prev_len, buf_multi, (uint32_t)0U);
+  update_multi(&tmp_block_state, prev_len, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
   update_last(&tmp_block_state, prev_len_last, buf_last, r);
-  finish(&tmp_block_state, dst);
+  finish(&tmp_block_state, output);
 }
 
 /**
 Perform a run-time test to determine which algorithm was chosen for the given piece of state.
 */
 Spec_Hash_Definitions_hash_alg
-EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_hash_state *s)
+EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_state_t *s)
 {
   EverCrypt_Hash_state_s *block_state = (*s).block_state;
   return alg_of_state(block_state);
 }
 
 /**
-Write the resulting hash into `dst`, an array whose length is
+Write the resulting hash into `output`, an array whose length is
 algorithm-specific. You can use the macros defined earlier in this file to
 allocate a destination buffer of the right length. The state remains valid after
-a call to `finish`, meaning the user may feed more data into the hash via
+a call to `digest`, meaning the user may feed more data into the hash via
 `update`. (The finish function operates on an internal copy of the state and
 therefore does not invalidate the client-held state.)
 */
-void EverCrypt_Hash_Incremental_finish(EverCrypt_Hash_Incremental_hash_state *s, uint8_t *dst)
+void
+EverCrypt_Hash_Incremental_digest(EverCrypt_Hash_Incremental_state_t *state, uint8_t *output)
 {
-  Spec_Hash_Definitions_hash_alg a1 = EverCrypt_Hash_Incremental_alg_of_state(s);
+  Spec_Hash_Definitions_hash_alg a1 = EverCrypt_Hash_Incremental_alg_of_state(state);
   switch (a1)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        finish_md5(s, dst);
+        digest_md5(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        finish_sha1(s, dst);
+        digest_sha1(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        finish_sha224(s, dst);
+        digest_sha224(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        finish_sha256(s, dst);
+        digest_sha256(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        finish_sha384(s, dst);
+        digest_sha384(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        finish_sha512(s, dst);
+        digest_sha512(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        finish_sha3_224(s, dst);
+        digest_sha3_224(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        finish_sha3_256(s, dst);
+        digest_sha3_256(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        finish_sha3_384(s, dst);
+        digest_sha3_384(state, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        finish_sha3_512(s, dst);
+        digest_sha3_512(state, output);
         break;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        finish_blake2s(s, dst);
+        digest_blake2s(state, output);
         break;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        finish_blake2b(s, dst);
+        digest_blake2b(state, output);
         break;
       }
     default:
@@ -2244,38 +2179,38 @@ void EverCrypt_Hash_Incremental_finish(EverCrypt_Hash_Incremental_hash_state *s,
 /**
 Free a state previously allocated with `create_in`.
 */
-void EverCrypt_Hash_Incremental_free(EverCrypt_Hash_Incremental_hash_state *s)
+void EverCrypt_Hash_Incremental_free(EverCrypt_Hash_Incremental_state_t *state)
 {
-  EverCrypt_Hash_Incremental_hash_state scrut = *s;
+  EverCrypt_Hash_Incremental_state_t scrut = *state;
   uint8_t *buf = scrut.buf;
   EverCrypt_Hash_state_s *block_state = scrut.block_state;
   free_(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
-void EverCrypt_Hash_Incremental_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void EverCrypt_Hash_Incremental_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t x = Hacl_Hash_SHA2_h256[i];
     os[i] = x;);
   uint32_t *s = st;
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -2285,35 +2220,35 @@ void EverCrypt_Hash_Incremental_hash_256(uint8_t *input, uint32_t input_len, uin
   uint32_t rest_len = rest_len0;
   uint8_t *rest = rest0;
   EverCrypt_Hash_update_multi_256(s, blocks, blocks_n);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)blocks_len + (uint64_t)rest_len,
+  Hacl_Hash_SHA2_sha256_update_last((uint64_t)blocks_len + (uint64_t)rest_len,
     rest_len,
     rest,
     s);
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst);
+  Hacl_Hash_SHA2_sha256_finish(s, output);
 }
 
-static void hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
+static void hash_224(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
+    uint32_t x = Hacl_Hash_SHA2_h224[i];
     os[i] = x;);
   uint32_t *s = st;
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -2323,15 +2258,15 @@ static void hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
   uint32_t rest_len = rest_len0;
   uint8_t *rest = rest0;
   EverCrypt_Hash_update_multi_256(s, blocks, blocks_n);
-  Hacl_SHA2_Scalar32_sha224_update_last((uint64_t)blocks_len + (uint64_t)rest_len,
+  Hacl_Hash_SHA2_sha224_update_last((uint64_t)blocks_len + (uint64_t)rest_len,
     rest_len,
     rest,
     s);
-  Hacl_SHA2_Scalar32_sha224_finish(s, dst);
+  Hacl_Hash_SHA2_sha224_finish(s, output);
 }
 
 /**
-Hash `input`, of len `len`, into `dst`, an array whose length is determined by
+Hash `input`, of len `input_len`, into `output`, an array whose length is determined by
 your choice of algorithm `a` (see Hacl_Spec.h). You can use the macros defined
 earlier in this file to allocate a destination buffer of the right length. This
 API will automatically pick the most efficient implementation, provided you have
@@ -2340,61 +2275,61 @@ called EverCrypt_AutoConfig2_init() before.
 void
 EverCrypt_Hash_Incremental_hash(
   Spec_Hash_Definitions_hash_alg a,
-  uint8_t *dst,
+  uint8_t *output,
   uint8_t *input,
-  uint32_t len
+  uint32_t input_len
 )
 {
   switch (a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        Hacl_Hash_MD5_legacy_hash(input, len, dst);
+        Hacl_Hash_MD5_hash_oneshot(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        Hacl_Hash_SHA1_legacy_hash(input, len, dst);
+        Hacl_Hash_SHA1_hash_oneshot(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        hash_224(input, len, dst);
+        hash_224(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        EverCrypt_Hash_Incremental_hash_256(input, len, dst);
+        EverCrypt_Hash_Incremental_hash_256(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        Hacl_Streaming_SHA2_hash_384(input, len, dst);
+        Hacl_Hash_SHA2_hash_384(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        Hacl_Streaming_SHA2_hash_512(input, len, dst);
+        Hacl_Hash_SHA2_hash_512(output, input, input_len);
         break;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        Hacl_SHA3_sha3_224(len, input, dst);
+        Hacl_Hash_SHA3_sha3_224(input_len, input, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        Hacl_SHA3_sha3_256(len, input, dst);
+        Hacl_Hash_SHA3_sha3_256(input_len, input, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        Hacl_SHA3_sha3_384(len, input, dst);
+        Hacl_Hash_SHA3_sha3_384(input_len, input, output);
         break;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        Hacl_SHA3_sha3_512(len, input, dst);
+        Hacl_Hash_SHA3_sha3_512(input_len, input, output);
         break;
       }
     case Spec_Hash_Definitions_Blake2S:
@@ -2403,12 +2338,12 @@ EverCrypt_Hash_Incremental_hash(
         bool vec128 = EverCrypt_AutoConfig2_has_vec128();
         if (vec128)
         {
-          Hacl_Blake2s_128_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+          Hacl_Hash_Blake2s_Simd128_hash_with_key(output, 32U, input, input_len, NULL, 0U);
           return;
         }
-        Hacl_Blake2s_32_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Hash_Blake2s_hash_with_key(output, 32U, input, input_len, NULL, 0U);
         #else
-        Hacl_Blake2s_32_blake2s((uint32_t)32U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Hash_Blake2s_hash_with_key(output, 32U, input, input_len, NULL, 0U);
         #endif
         break;
       }
@@ -2418,12 +2353,12 @@ EverCrypt_Hash_Incremental_hash(
         bool vec256 = EverCrypt_AutoConfig2_has_vec256();
         if (vec256)
         {
-          Hacl_Blake2b_256_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+          Hacl_Hash_Blake2b_Simd256_hash_with_key(output, 64U, input, input_len, NULL, 0U);
           return;
         }
-        Hacl_Blake2b_32_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Hash_Blake2b_hash_with_key(output, 64U, input, input_len, NULL, 0U);
         #else
-        Hacl_Blake2b_32_blake2b((uint32_t)64U, dst, len, input, (uint32_t)0U, NULL);
+        Hacl_Hash_Blake2b_hash_with_key(output, 64U, input, input_len, NULL, 0U);
         #endif
         break;
       }
diff --git a/src/msvc/EverCrypt_Poly1305.c b/src/msvc/EverCrypt_Poly1305.c
index 454c0fce..33ee20f3 100644
--- a/src/msvc/EverCrypt_Poly1305.c
+++ b/src/msvc/EverCrypt_Poly1305.c
@@ -31,60 +31,60 @@
 KRML_MAYBE_UNUSED static void
 poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key)
 {
-  KRML_HOST_IGNORE(dst);
-  KRML_HOST_IGNORE(src);
-  KRML_HOST_IGNORE(len);
-  KRML_HOST_IGNORE(key);
+  KRML_MAYBE_UNUSED_VAR(dst);
+  KRML_MAYBE_UNUSED_VAR(src);
+  KRML_MAYBE_UNUSED_VAR(len);
+  KRML_MAYBE_UNUSED_VAR(key);
   #if HACL_CAN_COMPILE_VALE
   uint8_t ctx[192U] = { 0U };
-  memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t));
-  uint32_t n_blocks = len / (uint32_t)16U;
-  uint32_t n_extra = len % (uint32_t)16U;
+  memcpy(ctx + 24U, key, 32U * sizeof (uint8_t));
+  uint32_t n_blocks = len / 16U;
+  uint32_t n_extra = len % 16U;
   uint8_t tmp[16U] = { 0U };
-  if (n_extra == (uint32_t)0U)
+  if (n_extra == 0U)
   {
-    KRML_HOST_IGNORE(x64_poly1305(ctx, src, (uint64_t)len, (uint64_t)1U));
+    x64_poly1305(ctx, src, (uint64_t)len, 1ULL);
   }
   else
   {
-    uint32_t len16 = n_blocks * (uint32_t)16U;
+    uint32_t len16 = n_blocks * 16U;
     uint8_t *src16 = src;
     memcpy(tmp, src + len16, n_extra * sizeof (uint8_t));
-    KRML_HOST_IGNORE(x64_poly1305(ctx, src16, (uint64_t)len16, (uint64_t)0U));
-    memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t));
-    KRML_HOST_IGNORE(x64_poly1305(ctx, tmp, (uint64_t)n_extra, (uint64_t)1U));
+    x64_poly1305(ctx, src16, (uint64_t)len16, 0ULL);
+    memcpy(ctx + 24U, key, 32U * sizeof (uint8_t));
+    x64_poly1305(ctx, tmp, (uint64_t)n_extra, 1ULL);
   }
-  memcpy(dst, ctx, (uint32_t)16U * sizeof (uint8_t));
+  memcpy(dst, ctx, 16U * sizeof (uint8_t));
   #endif
 }
 
-void EverCrypt_Poly1305_poly1305(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key)
+void EverCrypt_Poly1305_mac(uint8_t *output, uint8_t *input, uint32_t input_len, uint8_t *key)
 {
   bool vec256 = EverCrypt_AutoConfig2_has_vec256();
   bool vec128 = EverCrypt_AutoConfig2_has_vec128();
   #if HACL_CAN_COMPILE_VEC256
   if (vec256)
   {
-    KRML_HOST_IGNORE(vec128);
-    Hacl_Poly1305_256_poly1305_mac(dst, len, src, key);
+    KRML_MAYBE_UNUSED_VAR(vec128);
+    Hacl_MAC_Poly1305_Simd256_mac(output, input, input_len, key);
     return;
   }
   #endif
   #if HACL_CAN_COMPILE_VEC128
   if (vec128)
   {
-    KRML_HOST_IGNORE(vec256);
-    Hacl_Poly1305_128_poly1305_mac(dst, len, src, key);
+    KRML_MAYBE_UNUSED_VAR(vec256);
+    Hacl_MAC_Poly1305_Simd128_mac(output, input, input_len, key);
     return;
   }
   #endif
-  KRML_HOST_IGNORE(vec256);
-  KRML_HOST_IGNORE(vec128);
+  KRML_MAYBE_UNUSED_VAR(vec256);
+  KRML_MAYBE_UNUSED_VAR(vec128);
   #if HACL_CAN_COMPILE_VALE
-  poly1305_vale(dst, src, len, key);
+  poly1305_vale(output, input, input_len, key);
   #else
   KRML_HOST_IGNORE(poly1305_vale);
-  Hacl_Poly1305_32_poly1305_mac(dst, len, src, key);
+  Hacl_MAC_Poly1305_mac(output, input, input_len, key);
   #endif
 }
 
diff --git a/src/msvc/Hacl_Chacha20Poly1305_32.c b/src/msvc/Hacl_AEAD_Chacha20Poly1305.c
similarity index 70%
rename from src/msvc/Hacl_Chacha20Poly1305_32.c
rename to src/msvc/Hacl_AEAD_Chacha20Poly1305.c
index 179af485..310c84fc 100644
--- a/src/msvc/Hacl_Chacha20Poly1305_32.c
+++ b/src/msvc/Hacl_AEAD_Chacha20Poly1305.c
@@ -23,35 +23,36 @@
  */
 
 
-#include "Hacl_Chacha20Poly1305_32.h"
+#include "Hacl_AEAD_Chacha20Poly1305.h"
 
+#include "internal/Hacl_MAC_Poly1305.h"
 #include "internal/Hacl_Krmllib.h"
 
 static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  uint64_t *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  uint64_t *pre0 = ctx + 5U;
   uint64_t *acc0 = ctx;
-  uint32_t nb = n * (uint32_t)16U / (uint32_t)16U;
-  uint32_t rem1 = n * (uint32_t)16U % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = n * 16U / 16U;
+  uint32_t rem1 = n * 16U % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = blocks + i * (uint32_t)16U;
+    uint8_t *block = blocks + i * 16U;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -62,12 +63,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r1 = pre0;
-    uint64_t *r5 = pre0 + (uint32_t)5U;
+    uint64_t *r5 = pre0 + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -122,28 +123,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -157,23 +158,23 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = blocks + nb * (uint32_t)16U;
+    uint8_t *last = blocks + nb * 16U;
     uint64_t e[5U] = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -184,12 +185,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     uint64_t mask = b;
-    uint64_t fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = fi | mask;
+    uint64_t fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = fi | mask;
     uint64_t *r1 = pre0;
-    uint64_t *r5 = pre0 + (uint32_t)5U;
+    uint64_t *r5 = pre0 + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -244,28 +245,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -281,22 +282,22 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    uint64_t *pre = ctx + (uint32_t)5U;
+    uint64_t *pre = ctx + 5U;
     uint64_t *acc = ctx;
     uint64_t e[5U] = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     uint64_t f0 = lo;
     uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
     uint64_t f01 = f010;
     uint64_t f111 = f110;
     uint64_t f2 = f20;
@@ -307,12 +308,12 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     uint64_t mask = b;
     uint64_t f4 = e[4U];
     e[4U] = f4 | mask;
     uint64_t *r1 = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
+    uint64_t *r5 = pre + 5U;
     uint64_t r0 = r1[0U];
     uint64_t r11 = r1[1U];
     uint64_t r2 = r1[2U];
@@ -367,28 +368,28 @@ static inline void poly1305_padded_32(uint64_t *ctx, uint32_t len, uint8_t *text
     uint64_t t2 = a26;
     uint64_t t3 = a36;
     uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
     uint64_t x0 = t0 & mask26;
     uint64_t x3 = t3 & mask26;
     uint64_t x1 = t1 + z0;
     uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
     uint64_t z12 = z11 + t;
     uint64_t x11 = x1 & mask26;
     uint64_t x41 = x4 & mask26;
     uint64_t x2 = t2 + z01;
     uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
     uint64_t x21 = x2 & mask26;
     uint64_t x02 = x01 & mask26;
     uint64_t x31 = x3 + z02;
     uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
+    uint64_t z03 = x31 >> 26U;
     uint64_t x32 = x31 & mask26;
     uint64_t x42 = x41 + z03;
     uint64_t o0 = x02;
@@ -417,31 +418,31 @@ poly1305_do_32(
 {
   uint64_t ctx[25U] = { 0U };
   uint8_t block[16U] = { 0U };
-  Hacl_Poly1305_32_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  Hacl_MAC_Poly1305_poly1305_init(ctx, k);
+  if (aadlen != 0U)
   {
     poly1305_padded_32(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_32(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  uint64_t *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  uint64_t *pre = ctx + 5U;
   uint64_t *acc = ctx;
   uint64_t e[5U] = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   uint64_t f0 = lo;
   uint64_t f1 = hi;
-  uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-  uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = f1 >> (uint32_t)40U;
+  uint64_t f010 = f0 & 0x3ffffffULL;
+  uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+  uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = f1 >> 40U;
   uint64_t f01 = f010;
   uint64_t f111 = f110;
   uint64_t f2 = f20;
@@ -452,12 +453,12 @@ poly1305_do_32(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   uint64_t mask = b;
   uint64_t f4 = e[4U];
   e[4U] = f4 | mask;
   uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
+  uint64_t *r5 = pre + 5U;
   uint64_t r0 = r[0U];
   uint64_t r1 = r[1U];
   uint64_t r2 = r[2U];
@@ -512,28 +513,28 @@ poly1305_do_32(
   uint64_t t2 = a26;
   uint64_t t3 = a36;
   uint64_t t4 = a46;
-  uint64_t mask26 = (uint64_t)0x3ffffffU;
-  uint64_t z0 = t0 >> (uint32_t)26U;
-  uint64_t z1 = t3 >> (uint32_t)26U;
+  uint64_t mask26 = 0x3ffffffULL;
+  uint64_t z0 = t0 >> 26U;
+  uint64_t z1 = t3 >> 26U;
   uint64_t x0 = t0 & mask26;
   uint64_t x3 = t3 & mask26;
   uint64_t x1 = t1 + z0;
   uint64_t x4 = t4 + z1;
-  uint64_t z01 = x1 >> (uint32_t)26U;
-  uint64_t z11 = x4 >> (uint32_t)26U;
-  uint64_t t = z11 << (uint32_t)2U;
+  uint64_t z01 = x1 >> 26U;
+  uint64_t z11 = x4 >> 26U;
+  uint64_t t = z11 << 2U;
   uint64_t z12 = z11 + t;
   uint64_t x11 = x1 & mask26;
   uint64_t x41 = x4 & mask26;
   uint64_t x2 = t2 + z01;
   uint64_t x01 = x0 + z12;
-  uint64_t z02 = x2 >> (uint32_t)26U;
-  uint64_t z13 = x01 >> (uint32_t)26U;
+  uint64_t z02 = x2 >> 26U;
+  uint64_t z13 = x01 >> 26U;
   uint64_t x21 = x2 & mask26;
   uint64_t x02 = x01 & mask26;
   uint64_t x31 = x3 + z02;
   uint64_t x12 = x11 + z13;
-  uint64_t z03 = x31 >> (uint32_t)26U;
+  uint64_t z03 = x31 >> 26U;
   uint64_t x32 = x31 & mask26;
   uint64_t x42 = x41 + z03;
   uint64_t o0 = x02;
@@ -546,42 +547,41 @@ poly1305_do_32(
   acc[2U] = o2;
   acc[3U] = o3;
   acc[4U] = o4;
-  Hacl_Poly1305_32_poly1305_finish(out, k, ctx);
+  Hacl_MAC_Poly1305_poly1305_finish(out, k, ctx);
 }
 
 /**
-Encrypt a message `m` with key `k`.
+Encrypt a message `input` with key `key`.
 
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
 */
 void
-Hacl_Chacha20Poly1305_32_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
 )
 {
-  Hacl_Chacha20_chacha20_encrypt(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_chacha20_encrypt(input_len, output, input, key, nonce, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_32(key, aadlen, aad, mlen, cipher, mac);
+  Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_32(key1, data_len, data, input_len, output, tag);
 }
 
 /**
@@ -606,35 +606,35 @@ If decryption fails, the array `m` remains unchanged and the function returns th
 @returns 0 on succeess; 1 on failure.
 */
 uint32_t
-Hacl_Chacha20Poly1305_32_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
 )
 {
-  uint8_t computed_mac[16U] = { 0U };
+  uint8_t computed_tag[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_chacha20_encrypt((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_32(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  Hacl_Chacha20_chacha20_encrypt(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_32(key1, data_len, data, input_len, input, computed_tag);
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    0U,
+    16U,
+    1U,
+    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_tag[i], tag[i]);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_chacha20_encrypt(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_chacha20_encrypt(input_len, output, input, key, nonce, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_Chacha20Poly1305_128.c b/src/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.c
similarity index 77%
rename from src/msvc/Hacl_Chacha20Poly1305_128.c
rename to src/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.c
index 4cf2eae9..0cfa41fd 100644
--- a/src/msvc/Hacl_Chacha20Poly1305_128.c
+++ b/src/msvc/Hacl_AEAD_Chacha20Poly1305_Simd128.c
@@ -23,65 +23,60 @@
  */
 
 
-#include "Hacl_Chacha20Poly1305_128.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd128.h"
 
-#include "internal/Hacl_Poly1305_128.h"
+#include "internal/Hacl_MAC_Poly1305_Simd128.h"
 #include "internal/Hacl_Krmllib.h"
 #include "libintvector.h"
 
 static inline void
 poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  Lib_IntVector_Intrinsics_vec128 *pre0 = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc0 = ctx;
-  uint32_t sz_block = (uint32_t)32U;
-  uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
+  uint32_t sz_block = 32U;
+  uint32_t len0 = n * 16U / sz_block * sz_block;
   uint8_t *t00 = blocks;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)32U;
+    uint32_t bs = 32U;
     uint8_t *text0 = t00;
-    Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc0, text0);
+    Hacl_MAC_Poly1305_Simd128_load_acc2(acc0, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t00 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
       Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
-      Lib_IntVector_Intrinsics_vec128
-      b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
+      Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + 16U);
       Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       f00 =
         Lib_IntVector_Intrinsics_vec128_and(lo,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f15 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)26U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f25 =
-        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)52U),
+        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
           Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-              Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-            (uint32_t)12U));
+              Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+            12U));
       Lib_IntVector_Intrinsics_vec128
       f30 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-            (uint32_t)14U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-      Lib_IntVector_Intrinsics_vec128
-      f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+      Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
       Lib_IntVector_Intrinsics_vec128 f0 = f00;
       Lib_IntVector_Intrinsics_vec128 f1 = f15;
       Lib_IntVector_Intrinsics_vec128 f2 = f25;
@@ -92,12 +87,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       e[2U] = f2;
       e[3U] = f3;
       e[4U] = f41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
       Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec128 *rn = pre0 + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec128 *rn5 = pre0 + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec128 *rn = pre0 + 10U;
+      Lib_IntVector_Intrinsics_vec128 *rn5 = pre0 + 15U;
       Lib_IntVector_Intrinsics_vec128 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec128 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec128 r2 = rn[2U];
@@ -202,37 +197,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       Lib_IntVector_Intrinsics_vec128 t2 = a24;
       Lib_IntVector_Intrinsics_vec128 t3 = a34;
       Lib_IntVector_Intrinsics_vec128 t4 = a44;
-      Lib_IntVector_Intrinsics_vec128
-      mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec128
-      z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec128
-      z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
       Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec128
-      z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec128
-      z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec128 o00 = x02;
@@ -266,45 +252,41 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
       acc0[3U] = o3;
       acc0[4U] = o4;
     }
-    Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(acc0, pre0);
+    Hacl_MAC_Poly1305_Simd128_fmul_r2_normalize(acc0, pre0);
   }
-  uint32_t len1 = n * (uint32_t)16U - len0;
+  uint32_t len1 = n * 16U - len0;
   uint8_t *t10 = blocks + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem1 = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem1 = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t10 + i * (uint32_t)16U;
+    uint8_t *block = t10 + i * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -315,12 +297,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -435,37 +417,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -479,41 +452,37 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = t10 + nb * (uint32_t)16U;
+    uint8_t *last = t10 + nb * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -524,12 +493,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-    Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec128 fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -644,37 +613,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -690,40 +650,36 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
     Lib_IntVector_Intrinsics_vec128 *acc = ctx;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -734,12 +690,12 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r1 = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec128 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r1[2U];
@@ -854,37 +810,28 @@ poly1305_padded_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -913,49 +860,45 @@ poly1305_do_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[25U] KRML_POST_ALIGN(16) = { 0U };
   uint8_t block[16U] = { 0U };
-  Hacl_Poly1305_128_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  Hacl_MAC_Poly1305_Simd128_poly1305_init(ctx, k);
+  if (aadlen != 0U)
   {
     poly1305_padded_128(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_128(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
   Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_and(f0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f110 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec128 f01 = f010;
   Lib_IntVector_Intrinsics_vec128 f111 = f110;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -966,12 +909,12 @@ poly1305_do_128(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
   Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1086,37 +1029,28 @@ poly1305_do_128(
   Lib_IntVector_Intrinsics_vec128 t2 = a26;
   Lib_IntVector_Intrinsics_vec128 t3 = a36;
   Lib_IntVector_Intrinsics_vec128 t4 = a46;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1129,95 +1063,93 @@ poly1305_do_128(
   acc[2U] = o2;
   acc[3U] = o3;
   acc[4U] = o4;
-  Hacl_Poly1305_128_poly1305_finish(out, k, ctx);
+  Hacl_MAC_Poly1305_Simd128_poly1305_finish(out, k, ctx);
 }
 
 /**
-Encrypt a message `m` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+Encrypt a message `input` with key `key`.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
 */
 void
-Hacl_Chacha20Poly1305_128_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
 )
 {
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(input_len, output, input, key, nonce, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_128(key, aadlen, aad, mlen, cipher, mac);
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_128(key1, data_len, data, input_len, output, tag);
 }
 
 /**
-Decrypt a ciphertext `cipher` with key `k`.
-
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+Decrypt a ciphertext `input` with key `key`.
 
-If decryption succeeds, the resulting plaintext is stored in `m` and the function returns the success code 0.
-If decryption fails, the array `m` remains unchanged and the function returns the error code 1.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
+If decryption succeeds, the resulting plaintext is stored in `output` and the function returns the success code 0.
+If decryption fails, the array `output` remains unchanged and the function returns the error code 1.
 
-@param mlen Length of the ciphertext.
-@param m Pointer to `mlen` bytes of memory where the message is written to.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is read from.
-@param mac Pointer to 16 bytes of memory where the mac is read from.
+@param output Pointer to `input_len` bytes of memory where the message is written to.
+@param input Pointer to `input_len` bytes of memory where the ciphertext is read from.
+@param input_len Length of the ciphertext.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+@param tag Pointer to 16 bytes of memory where the mac is read from.
 
 @returns 0 on succeess; 1 on failure.
 */
 uint32_t
-Hacl_Chacha20Poly1305_128_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
 )
 {
-  uint8_t computed_mac[16U] = { 0U };
+  uint8_t computed_tag[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec128_chacha20_encrypt_128((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_128(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  Hacl_Chacha20_Vec128_chacha20_encrypt_128(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_128(key1, data_len, data, input_len, input, computed_tag);
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    0U,
+    16U,
+    1U,
+    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_tag[i], tag[i]);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_Vec128_chacha20_encrypt_128(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_Vec128_chacha20_encrypt_128(input_len, output, input, key, nonce, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/Hacl_Chacha20Poly1305_256.c b/src/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.c
similarity index 77%
rename from src/Hacl_Chacha20Poly1305_256.c
rename to src/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.c
index c3dfec03..28414516 100644
--- a/src/Hacl_Chacha20Poly1305_256.c
+++ b/src/msvc/Hacl_AEAD_Chacha20Poly1305_Simd256.c
@@ -23,67 +23,61 @@
  */
 
 
-#include "Hacl_Chacha20Poly1305_256.h"
+#include "Hacl_AEAD_Chacha20Poly1305_Simd256.h"
 
-#include "internal/Hacl_Poly1305_256.h"
+#include "internal/Hacl_MAC_Poly1305_Simd256.h"
 #include "internal/Hacl_Krmllib.h"
 #include "libintvector.h"
 
 static inline void
 poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t *text)
 {
-  uint32_t n = len / (uint32_t)16U;
-  uint32_t r = len % (uint32_t)16U;
+  uint32_t n = len / 16U;
+  uint32_t r = len % 16U;
   uint8_t *blocks = text;
-  uint8_t *rem = text + n * (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + (uint32_t)5U;
+  uint8_t *rem = text + n * 16U;
+  Lib_IntVector_Intrinsics_vec256 *pre0 = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc0 = ctx;
-  uint32_t sz_block = (uint32_t)64U;
-  uint32_t len0 = n * (uint32_t)16U / sz_block * sz_block;
+  uint32_t sz_block = 64U;
+  uint32_t len0 = n * 16U / sz_block * sz_block;
   uint8_t *t00 = blocks;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)64U;
+    uint32_t bs = 64U;
     uint8_t *text0 = t00;
-    Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc0, text0);
+    Hacl_MAC_Poly1305_Simd256_load_acc4(acc0, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t00 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
       Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
+      Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + 32U);
       Lib_IntVector_Intrinsics_vec256
-      hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
-      Lib_IntVector_Intrinsics_vec256
-      mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+      mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
       Lib_IntVector_Intrinsics_vec256
       m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
       Lib_IntVector_Intrinsics_vec256
       m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-      Lib_IntVector_Intrinsics_vec256
-      m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-      Lib_IntVector_Intrinsics_vec256
-      m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+      Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+      Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
       Lib_IntVector_Intrinsics_vec256
       m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-      Lib_IntVector_Intrinsics_vec256
-      t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
+      Lib_IntVector_Intrinsics_vec256 t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 4U);
       Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
       Lib_IntVector_Intrinsics_vec256
-      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
+      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, 26U);
       Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
       Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
+      Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 30U);
       Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
       Lib_IntVector_Intrinsics_vec256 o00 = o5;
       Lib_IntVector_Intrinsics_vec256 o11 = o10;
       Lib_IntVector_Intrinsics_vec256 o21 = o20;
@@ -94,12 +88,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       e[2U] = o21;
       e[3U] = o31;
       e[4U] = o41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
       Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec256 *rn = pre0 + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec256 *rn5 = pre0 + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec256 *rn = pre0 + 10U;
+      Lib_IntVector_Intrinsics_vec256 *rn5 = pre0 + 15U;
       Lib_IntVector_Intrinsics_vec256 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -204,37 +198,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       Lib_IntVector_Intrinsics_vec256 t2 = a24;
       Lib_IntVector_Intrinsics_vec256 t3 = a34;
       Lib_IntVector_Intrinsics_vec256 t4 = a44;
-      Lib_IntVector_Intrinsics_vec256
-      mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec256
-      z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec256
-      z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
       Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec256
-      z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec256
-      z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec256 o01 = x02;
@@ -268,45 +253,41 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
       acc0[3U] = o3;
       acc0[4U] = o4;
     }
-    Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(acc0, pre0);
+    Hacl_MAC_Poly1305_Simd256_fmul_r4_normalize(acc0, pre0);
   }
-  uint32_t len1 = n * (uint32_t)16U - len0;
+  uint32_t len1 = n * 16U - len0;
   uint8_t *t10 = blocks + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem1 = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem1 = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t10 + i * (uint32_t)16U;
+    uint8_t *block = t10 + i * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -317,12 +298,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -437,37 +418,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -481,41 +453,37 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     acc0[3U] = o3;
     acc0[4U] = o4;
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *last = t10 + nb * (uint32_t)16U;
+    uint8_t *last = t10 + nb * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem1 * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -526,12 +494,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem1 * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem1 * 8U % 26U;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-    Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * (uint32_t)8U / (uint32_t)26U];
-    e[rem1 * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec256 fi = e[rem1 * 8U / 26U];
+    e[rem1 * 8U / 26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre0;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre0 + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -646,37 +614,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -692,40 +651,36 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
   }
   uint8_t tmp[16U] = { 0U };
   memcpy(tmp, rem, r * sizeof (uint8_t));
-  if (r > (uint32_t)0U)
+  if (r > 0U)
   {
-    Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
     Lib_IntVector_Intrinsics_vec256 *acc = ctx;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -736,12 +691,12 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r1 = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r1[0U];
     Lib_IntVector_Intrinsics_vec256 r11 = r1[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r1[2U];
@@ -856,37 +811,28 @@ poly1305_padded_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -915,49 +861,45 @@ poly1305_do_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[25U] KRML_POST_ALIGN(32) = { 0U };
   uint8_t block[16U] = { 0U };
-  Hacl_Poly1305_256_poly1305_init(ctx, k);
-  if (aadlen != (uint32_t)0U)
+  Hacl_MAC_Poly1305_Simd256_poly1305_init(ctx, k);
+  if (aadlen != 0U)
   {
     poly1305_padded_256(ctx, aadlen, aad);
   }
-  if (mlen != (uint32_t)0U)
+  if (mlen != 0U)
   {
     poly1305_padded_256(ctx, mlen, m);
   }
   store64_le(block, (uint64_t)aadlen);
-  store64_le(block + (uint32_t)8U, (uint64_t)mlen);
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  store64_le(block + 8U, (uint64_t)mlen);
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   uint64_t u0 = load64_le(block);
   uint64_t lo = u0;
-  uint64_t u = load64_le(block + (uint32_t)8U);
+  uint64_t u = load64_le(block + 8U);
   uint64_t hi = u;
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_and(f0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f110 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
   Lib_IntVector_Intrinsics_vec256 f01 = f010;
   Lib_IntVector_Intrinsics_vec256 f111 = f110;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -968,12 +910,12 @@ poly1305_do_256(
   e[2U] = f2;
   e[3U] = f3;
   e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
+  uint64_t b = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
   Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1088,37 +1030,28 @@ poly1305_do_256(
   Lib_IntVector_Intrinsics_vec256 t2 = a26;
   Lib_IntVector_Intrinsics_vec256 t3 = a36;
   Lib_IntVector_Intrinsics_vec256 t4 = a46;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1131,95 +1064,93 @@ poly1305_do_256(
   acc[2U] = o2;
   acc[3U] = o3;
   acc[4U] = o4;
-  Hacl_Poly1305_256_poly1305_finish(out, k, ctx);
+  Hacl_MAC_Poly1305_Simd256_poly1305_finish(out, k, ctx);
 }
 
 /**
-Encrypt a message `m` with key `k`.
+Encrypt a message `input` with key `key`.
 
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the message.
-@param m Pointer to `mlen` bytes of memory where the message is read from.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is written to.
-@param mac Pointer to 16 bytes of memory where the mac is written to.
+@param output Pointer to `input_len` bytes of memory where the ciphertext is written to.
+@param tag Pointer to 16 bytes of memory where the mac is written to.
+@param input Pointer to `input_len` bytes of memory where the message is read from.
+@param input_len Length of the message.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
 */
 void
-Hacl_Chacha20Poly1305_256_aead_encrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(
+  uint8_t *output,
+  uint8_t *tag,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce
 )
 {
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, cipher, m, k, n, (uint32_t)1U);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(input_len, output, input, key, nonce, 1U);
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_256(key, aadlen, aad, mlen, cipher, mac);
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_256(key1, data_len, data, input_len, output, tag);
 }
 
 /**
-Decrypt a ciphertext `cipher` with key `k`.
+Decrypt a ciphertext `input` with key `key`.
 
-The arguments `k`, `n`, `aadlen`, and `aad` are same in encryption/decryption.
-Note: Encryption and decryption can be executed in-place, i.e., `m` and `cipher` can point to the same memory.
+The arguments `key`, `nonce`, `data`, and `data_len` are same in encryption/decryption.
+Note: Encryption and decryption can be executed in-place, i.e., `input` and `output` can point to the same memory.
 
-If decryption succeeds, the resulting plaintext is stored in `m` and the function returns the success code 0.
-If decryption fails, the array `m` remains unchanged and the function returns the error code 1.
+If decryption succeeds, the resulting plaintext is stored in `output` and the function returns the success code 0.
+If decryption fails, the array `output` remains unchanged and the function returns the error code 1.
 
-@param k Pointer to 32 bytes of memory where the AEAD key is read from.
-@param n Pointer to 12 bytes of memory where the AEAD nonce is read from.
-@param aadlen Length of the associated data.
-@param aad Pointer to `aadlen` bytes of memory where the associated data is read from.
-
-@param mlen Length of the ciphertext.
-@param m Pointer to `mlen` bytes of memory where the message is written to.
-@param cipher Pointer to `mlen` bytes of memory where the ciphertext is read from.
-@param mac Pointer to 16 bytes of memory where the mac is read from.
+@param output Pointer to `input_len` bytes of memory where the message is written to.
+@param input Pointer to `input_len` bytes of memory where the ciphertext is read from.
+@param input_len Length of the ciphertext.
+@param data Pointer to `data_len` bytes of memory where the associated data is read from.
+@param data_len Length of the associated data.
+@param key Pointer to 32 bytes of memory where the AEAD key is read from.
+@param nonce Pointer to 12 bytes of memory where the AEAD nonce is read from.
+@param tag Pointer to 16 bytes of memory where the mac is read from.
 
 @returns 0 on succeess; 1 on failure.
 */
 uint32_t
-Hacl_Chacha20Poly1305_256_aead_decrypt(
-  uint8_t *k,
-  uint8_t *n,
-  uint32_t aadlen,
-  uint8_t *aad,
-  uint32_t mlen,
-  uint8_t *m,
-  uint8_t *cipher,
-  uint8_t *mac
+Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *data,
+  uint32_t data_len,
+  uint8_t *key,
+  uint8_t *nonce,
+  uint8_t *tag
 )
 {
-  uint8_t computed_mac[16U] = { 0U };
+  uint8_t computed_tag[16U] = { 0U };
   uint8_t tmp[64U] = { 0U };
-  Hacl_Chacha20_Vec256_chacha20_encrypt_256((uint32_t)64U, tmp, tmp, k, n, (uint32_t)0U);
-  uint8_t *key = tmp;
-  poly1305_do_256(key, aadlen, aad, mlen, cipher, computed_mac);
-  uint8_t res = (uint8_t)255U;
+  Hacl_Chacha20_Vec256_chacha20_encrypt_256(64U, tmp, tmp, key, nonce, 0U);
+  uint8_t *key1 = tmp;
+  poly1305_do_256(key1, data_len, data, input_len, input, computed_tag);
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_mac[i], mac[i]);
-    res = uu____0 & res;);
+    0U,
+    16U,
+    1U,
+    uint8_t uu____0 = FStar_UInt8_eq_mask(computed_tag[i], tag[i]);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
-    Hacl_Chacha20_Vec256_chacha20_encrypt_256(mlen, m, cipher, k, n, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Chacha20_Vec256_chacha20_encrypt_256(input_len, output, input, key, nonce, 1U);
+    return 0U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_Bignum.c b/src/msvc/Hacl_Bignum.c
index ca093c6d..ceb82f92 100644
--- a/src/msvc/Hacl_Bignum.c
+++ b/src/msvc/Hacl_Bignum.c
@@ -37,12 +37,12 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_mul_u32(aLen, a, aLen, b, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint32_t *a0 = a;
   uint32_t *a1 = a + len2;
   uint32_t *b0 = b;
@@ -52,23 +52,23 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *tmp_ = tmp + aLen;
   uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_);
   uint32_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t0;
-    uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]);
+    uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c10);
+  KRML_MAYBE_UNUSED_VAR(c10);
   uint32_t c00 = c0;
   uint32_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b0, b1, tmp_);
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, b1, b0, t1);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t1;
-    uint32_t x = (((uint32_t)0U - c010) & t1[i]) | (~((uint32_t)0U - c010) & tmp_[i]);
+    uint32_t x = ((0U - c010) & t1[i]) | (~(0U - c010) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint32_t c11 = c010;
   uint32_t *t23 = tmp + aLen;
   uint32_t *tmp1 = tmp + aLen + aLen;
@@ -81,66 +81,61 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t *r231 = res + aLen;
   uint32_t *t01 = tmp;
   uint32_t *t231 = tmp + aLen;
-  uint32_t *t45 = tmp + (uint32_t)2U * aLen;
-  uint32_t *t67 = tmp + (uint32_t)3U * aLen;
+  uint32_t *t45 = tmp + 2U * aLen;
+  uint32_t *t67 = tmp + 3U * aLen;
   uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01);
   uint32_t c_sign = c00 ^ c11;
   uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t67);
   uint32_t c31 = c2 - c3;
   uint32_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, t01, t231, t45);
   uint32_t c41 = c2 + c4;
-  uint32_t mask = (uint32_t)0U - c_sign;
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  uint32_t mask = 0U - c_sign;
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint32_t *os = t45;
     uint32_t x = (mask & t45[i]) | (~mask & t67[i]);
     os[i] = x;
   }
   uint32_t c5 = (mask & c41) | (~mask & c31);
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint32_t *r0 = res + aLen2;
   uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0);
   uint32_t c6 = r10;
   uint32_t c60 = c6;
   uint32_t c7 = c5 + c60;
   uint32_t *r = res + aLen + aLen2;
-  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r);
+  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r);
   uint32_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint32_t *a11 = r + (uint32_t)1U;
-    uint32_t *res1 = r + (uint32_t)1U;
+    uint32_t *a11 = r + 1U;
+    uint32_t *res1 = r + 1U;
     uint32_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint32_t t11 = a11[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i0);
-      uint32_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, (uint32_t)0U, res_i1);
-      uint32_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, (uint32_t)0U, res_i2);
-      uint32_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, (uint32_t)0U, res_i);
+      uint32_t t11 = a11[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i0);
+      uint32_t t110 = a11[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t110, 0U, res_i1);
+      uint32_t t111 = a11[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t111, 0U, res_i2);
+      uint32_t t112 = a11[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t112, 0U, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint32_t t11 = a11[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i);
     }
     uint32_t c110 = c;
     r1 = c110;
@@ -152,7 +147,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(
   uint32_t c8 = r1;
   uint32_t c = c8;
   uint32_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -164,12 +159,12 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_mul_u64(aLen, a, aLen, b, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint64_t *a0 = a;
   uint64_t *a1 = a + len2;
   uint64_t *b0 = b;
@@ -179,23 +174,23 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *tmp_ = tmp + aLen;
   uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_);
   uint64_t c10 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t0;
-    uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]);
+    uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c10);
+  KRML_MAYBE_UNUSED_VAR(c10);
   uint64_t c00 = c0;
   uint64_t c010 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b0, b1, tmp_);
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, b1, b0, t1);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t1;
-    uint64_t x = (((uint64_t)0U - c010) & t1[i]) | (~((uint64_t)0U - c010) & tmp_[i]);
+    uint64_t x = ((0ULL - c010) & t1[i]) | (~(0ULL - c010) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint64_t c11 = c010;
   uint64_t *t23 = tmp + aLen;
   uint64_t *tmp1 = tmp + aLen + aLen;
@@ -208,66 +203,61 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t *r231 = res + aLen;
   uint64_t *t01 = tmp;
   uint64_t *t231 = tmp + aLen;
-  uint64_t *t45 = tmp + (uint32_t)2U * aLen;
-  uint64_t *t67 = tmp + (uint32_t)3U * aLen;
+  uint64_t *t45 = tmp + 2U * aLen;
+  uint64_t *t67 = tmp + 3U * aLen;
   uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01);
   uint64_t c_sign = c00 ^ c11;
   uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t67);
   uint64_t c31 = c2 - c3;
   uint64_t c4 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, t01, t231, t45);
   uint64_t c41 = c2 + c4;
-  uint64_t mask = (uint64_t)0U - c_sign;
-  for (uint32_t i = (uint32_t)0U; i < aLen; i++)
+  uint64_t mask = 0ULL - c_sign;
+  for (uint32_t i = 0U; i < aLen; i++)
   {
     uint64_t *os = t45;
     uint64_t x = (mask & t45[i]) | (~mask & t67[i]);
     os[i] = x;
   }
   uint64_t c5 = (mask & c41) | (~mask & c31);
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint64_t *r0 = res + aLen2;
   uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0);
   uint64_t c6 = r10;
   uint64_t c60 = c6;
   uint64_t c7 = c5 + c60;
   uint64_t *r = res + aLen + aLen2;
-  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r);
+  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r);
   uint64_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint64_t *a11 = r + (uint32_t)1U;
-    uint64_t *res1 = r + (uint32_t)1U;
+    uint64_t *a11 = r + 1U;
+    uint64_t *res1 = r + 1U;
     uint64_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint64_t t11 = a11[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i0);
-      uint64_t t110 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, (uint64_t)0U, res_i1);
-      uint64_t t111 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, (uint64_t)0U, res_i2);
-      uint64_t t112 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, (uint64_t)0U, res_i);
+      uint64_t t11 = a11[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i0);
+      uint64_t t110 = a11[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t110, 0ULL, res_i1);
+      uint64_t t111 = a11[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t111, 0ULL, res_i2);
+      uint64_t t112 = a11[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t112, 0ULL, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint64_t t11 = a11[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i);
     }
     uint64_t c110 = c;
     r1 = c110;
@@ -279,7 +269,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(
   uint64_t c8 = r1;
   uint64_t c = c8;
   uint64_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -290,27 +280,27 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_sqr_u32(aLen, a, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint32_t *a0 = a;
   uint32_t *a1 = a + len2;
   uint32_t *t0 = tmp;
   uint32_t *tmp_ = tmp + aLen;
   uint32_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a0, a1, tmp_);
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint32_t *os = t0;
-    uint32_t x = (((uint32_t)0U - c0) & t0[i]) | (~((uint32_t)0U - c0) & tmp_[i]);
+    uint32_t x = ((0U - c0) & t0[i]) | (~(0U - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint32_t c00 = c0;
-  KRML_HOST_IGNORE(c00);
+  KRML_MAYBE_UNUSED_VAR(c00);
   uint32_t *t23 = tmp + aLen;
   uint32_t *tmp1 = tmp + aLen + aLen;
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len2, t0, tmp1, t23);
@@ -322,54 +312,49 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t *r231 = res + aLen;
   uint32_t *t01 = tmp;
   uint32_t *t231 = tmp + aLen;
-  uint32_t *t45 = tmp + (uint32_t)2U * aLen;
+  uint32_t *t45 = tmp + 2U * aLen;
   uint32_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r011, r231, t01);
   uint32_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(aLen, t01, t231, t45);
   uint32_t c5 = c2 - c3;
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint32_t *r0 = res + aLen2;
   uint32_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u32(aLen, r0, t45, r0);
   uint32_t c4 = r10;
   uint32_t c6 = c4;
   uint32_t c7 = c5 + c6;
   uint32_t *r = res + aLen + aLen2;
-  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32((uint32_t)0U, r[0U], c7, r);
+  uint32_t c01 = Lib_IntTypes_Intrinsics_add_carry_u32(0U, r[0U], c7, r);
   uint32_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint32_t *a11 = r + (uint32_t)1U;
-    uint32_t *res1 = r + (uint32_t)1U;
+    uint32_t *a11 = r + 1U;
+    uint32_t *res1 = r + 1U;
     uint32_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint32_t t1 = a11[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a11[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a11[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a11[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a11[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, 0U, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint32_t t1 = a11[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     r1 = c10;
@@ -381,7 +366,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(
   uint32_t c8 = r1;
   uint32_t c = c8;
   uint32_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -392,27 +377,27 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t *res
 )
 {
-  if (aLen < (uint32_t)32U || aLen % (uint32_t)2U == (uint32_t)1U)
+  if (aLen < 32U || aLen % 2U == 1U)
   {
     Hacl_Bignum_Multiplication_bn_sqr_u64(aLen, a, res);
     return;
   }
-  uint32_t len2 = aLen / (uint32_t)2U;
+  uint32_t len2 = aLen / 2U;
   uint64_t *a0 = a;
   uint64_t *a1 = a + len2;
   uint64_t *t0 = tmp;
   uint64_t *tmp_ = tmp + aLen;
   uint64_t c0 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a0, a1, tmp_);
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len2, a1, a0, t0);
-  for (uint32_t i = (uint32_t)0U; i < len2; i++)
+  for (uint32_t i = 0U; i < len2; i++)
   {
     uint64_t *os = t0;
-    uint64_t x = (((uint64_t)0U - c0) & t0[i]) | (~((uint64_t)0U - c0) & tmp_[i]);
+    uint64_t x = ((0ULL - c0) & t0[i]) | (~(0ULL - c0) & tmp_[i]);
     os[i] = x;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   uint64_t c00 = c0;
-  KRML_HOST_IGNORE(c00);
+  KRML_MAYBE_UNUSED_VAR(c00);
   uint64_t *t23 = tmp + aLen;
   uint64_t *tmp1 = tmp + aLen + aLen;
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len2, t0, tmp1, t23);
@@ -424,54 +409,49 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t *r231 = res + aLen;
   uint64_t *t01 = tmp;
   uint64_t *t231 = tmp + aLen;
-  uint64_t *t45 = tmp + (uint32_t)2U * aLen;
+  uint64_t *t45 = tmp + 2U * aLen;
   uint64_t c2 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r011, r231, t01);
   uint64_t c3 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(aLen, t01, t231, t45);
   uint64_t c5 = c2 - c3;
-  uint32_t aLen2 = aLen / (uint32_t)2U;
+  uint32_t aLen2 = aLen / 2U;
   uint64_t *r0 = res + aLen2;
   uint64_t r10 = Hacl_Bignum_Addition_bn_add_eq_len_u64(aLen, r0, t45, r0);
   uint64_t c4 = r10;
   uint64_t c6 = c4;
   uint64_t c7 = c5 + c6;
   uint64_t *r = res + aLen + aLen2;
-  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, r[0U], c7, r);
+  uint64_t c01 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, r[0U], c7, r);
   uint64_t r1;
-  if ((uint32_t)1U < aLen + aLen - (aLen + aLen2))
+  if (1U < aLen + aLen - (aLen + aLen2))
   {
-    uint64_t *a11 = r + (uint32_t)1U;
-    uint64_t *res1 = r + (uint32_t)1U;
+    uint64_t *a11 = r + 1U;
+    uint64_t *res1 = r + 1U;
     uint64_t c = c01;
-    for
-    (uint32_t
-      i = (uint32_t)0U;
-      i
-      < (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U;
-      i++)
+    for (uint32_t i = 0U; i < (aLen + aLen - (aLen + aLen2) - 1U) / 4U; i++)
     {
-      uint64_t t1 = a11[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a11[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a11[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a11[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a11[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a11[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a11[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a11[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i);
     }
     for
     (uint32_t
-      i = (aLen + aLen - (aLen + aLen2) - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
+      i = (aLen + aLen - (aLen + aLen2) - 1U) / 4U * 4U;
       i
-      < aLen + aLen - (aLen + aLen2) - (uint32_t)1U;
+      < aLen + aLen - (aLen + aLen2) - 1U;
       i++)
     {
       uint64_t t1 = a11[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     r1 = c10;
@@ -483,7 +463,7 @@ Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(
   uint64_t c8 = r1;
   uint64_t c = c8;
   uint64_t c9 = c;
-  KRML_HOST_IGNORE(c9);
+  KRML_MAYBE_UNUSED_VAR(c9);
 }
 
 void
@@ -495,27 +475,27 @@ Hacl_Bignum_bn_add_mod_n_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -526,27 +506,27 @@ Hacl_Bignum_bn_add_mod_n_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t));
   memset(tmp, 0U, len1 * sizeof (uint32_t));
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -555,7 +535,7 @@ Hacl_Bignum_bn_add_mod_n_u32(
   }
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -572,27 +552,27 @@ Hacl_Bignum_bn_add_mod_n_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -603,27 +583,27 @@ Hacl_Bignum_bn_add_mod_n_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t));
   memset(tmp, 0U, len1 * sizeof (uint64_t));
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -632,7 +612,7 @@ Hacl_Bignum_bn_add_mod_n_u64(
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -649,27 +629,27 @@ Hacl_Bignum_bn_sub_mod_n_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = a[i];
     uint32_t t2 = b[i];
@@ -680,27 +660,27 @@ Hacl_Bignum_bn_sub_mod_n_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t));
   memset(tmp, 0U, len1 * sizeof (uint32_t));
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -708,9 +688,9 @@ Hacl_Bignum_bn_sub_mod_n_u32(
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t2, res_i);
   }
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -727,27 +707,27 @@ Hacl_Bignum_bn_sub_mod_n_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = a[i];
     uint64_t t2 = b[i];
@@ -758,27 +738,27 @@ Hacl_Bignum_bn_sub_mod_n_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t));
   memset(tmp, 0U, len1 * sizeof (uint64_t));
-  uint64_t c = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len1 / (uint32_t)4U; i++)
+  uint64_t c = 0ULL;
+  for (uint32_t i = 0U; i < len1 / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
-  for (uint32_t i = len1 / (uint32_t)4U * (uint32_t)4U; i < len1; i++)
+  for (uint32_t i = len1 / 4U * 4U; i < len1; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -786,9 +766,9 @@ Hacl_Bignum_bn_sub_mod_n_u64(
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len1; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
+  for (uint32_t i = 0U; i < len1; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -798,42 +778,42 @@ Hacl_Bignum_bn_sub_mod_n_u64(
 
 uint32_t Hacl_Bignum_ModInvLimb_mod_inv_uint32(uint32_t n0)
 {
-  uint32_t alpha = (uint32_t)2147483648U;
+  uint32_t alpha = 2147483648U;
   uint32_t beta = n0;
-  uint32_t ub = (uint32_t)0U;
-  uint32_t vb = (uint32_t)0U;
-  ub = (uint32_t)1U;
-  vb = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t ub = 0U;
+  uint32_t vb = 0U;
+  ub = 1U;
+  vb = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint32_t us = ub;
     uint32_t vs = vb;
-    uint32_t u_is_odd = (uint32_t)0U - (us & (uint32_t)1U);
+    uint32_t u_is_odd = 0U - (us & 1U);
     uint32_t beta_if_u_is_odd = beta & u_is_odd;
-    ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd);
+    ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd);
     uint32_t alpha_if_u_is_odd = alpha & u_is_odd;
-    vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd;
+    vb = (vs >> 1U) + alpha_if_u_is_odd;
   }
   return vb;
 }
 
 uint64_t Hacl_Bignum_ModInvLimb_mod_inv_uint64(uint64_t n0)
 {
-  uint64_t alpha = (uint64_t)9223372036854775808U;
+  uint64_t alpha = 9223372036854775808ULL;
   uint64_t beta = n0;
-  uint64_t ub = (uint64_t)0U;
-  uint64_t vb = (uint64_t)0U;
-  ub = (uint64_t)1U;
-  vb = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t ub = 0ULL;
+  uint64_t vb = 0ULL;
+  ub = 1ULL;
+  vb = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t us = ub;
     uint64_t vs = vb;
-    uint64_t u_is_odd = (uint64_t)0U - (us & (uint64_t)1U);
+    uint64_t u_is_odd = 0ULL - (us & 1ULL);
     uint64_t beta_if_u_is_odd = beta & u_is_odd;
-    ub = ((us ^ beta_if_u_is_odd) >> (uint32_t)1U) + (us & beta_if_u_is_odd);
+    ub = ((us ^ beta_if_u_is_odd) >> 1U) + (us & beta_if_u_is_odd);
     uint64_t alpha_if_u_is_odd = alpha & u_is_odd;
-    vb = (vs >> (uint32_t)1U) + alpha_if_u_is_odd;
+    vb = (vs >> 1U) + alpha_if_u_is_odd;
   }
   return vb;
 }
@@ -844,15 +824,15 @@ uint32_t Hacl_Bignum_Montgomery_bn_check_modulus_u32(uint32_t len, uint32_t *n)
   uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   return m0 & m1;
@@ -867,10 +847,10 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(
 )
 {
   memset(res, 0U, len * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U * len - nBits; i0++)
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 64U * len - nBits; i0++)
   {
     Hacl_Bignum_bn_add_mod_n_u32(len, n, res, res, res);
   }
@@ -885,28 +865,28 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -923,27 +903,27 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp, 0U, len * sizeof (uint32_t));
-  uint32_t c1 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+  uint32_t c1 = 0U;
+  for (uint32_t i = 0U; i < len / 4U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i);
   }
-  for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+  for (uint32_t i = len / 4U * 4U; i < len; i++)
   {
     uint32_t t1 = res[i];
     uint32_t t2 = n[i];
@@ -952,7 +932,7 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u32(
   }
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -973,9 +953,9 @@ Hacl_Bignum_Montgomery_bn_to_mont_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv, c, aM);
 }
@@ -1009,9 +989,9 @@ Hacl_Bignum_Montgomery_bn_mont_mul_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
@@ -1028,9 +1008,9 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
@@ -1041,15 +1021,15 @@ uint64_t Hacl_Bignum_Montgomery_bn_check_modulus_u64(uint32_t len, uint64_t *n)
   uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   return m0 & m1;
@@ -1064,10 +1044,10 @@ Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(
 )
 {
   memset(res, 0U, len * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U * len - nBits; i0++)
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 128U * len - nBits; i0++)
   {
     Hacl_Bignum_bn_add_mod_n_u64(len, n, res, res, res);
   }
@@ -1082,28 +1062,28 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -1120,27 +1100,27 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp, 0U, len * sizeof (uint64_t));
-  uint64_t c1 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+  uint64_t c1 = 0ULL;
+  for (uint32_t i = 0U; i < len / 4U; i++)
   {
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i);
   }
-  for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+  for (uint32_t i = len / 4U * 4U; i < len; i++)
   {
     uint64_t t1 = res[i];
     uint64_t t2 = n[i];
@@ -1149,7 +1129,7 @@ Hacl_Bignum_Montgomery_bn_mont_reduction_u64(
   }
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -1170,9 +1150,9 @@ Hacl_Bignum_Montgomery_bn_to_mont_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv, c, aM);
 }
@@ -1206,9 +1186,9 @@ Hacl_Bignum_Montgomery_bn_mont_mul_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
@@ -1225,9 +1205,9 @@ Hacl_Bignum_Montgomery_bn_mont_sqr_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
@@ -1241,28 +1221,28 @@ bn_almost_mont_reduction_u32(
   uint32_t *res
 )
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -1280,9 +1260,9 @@ bn_almost_mont_reduction_u32(
   uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp, 0U, len * sizeof (uint32_t));
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
@@ -1303,9 +1283,9 @@ bn_almost_mont_mul_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, aM, bM, tmp, c);
   bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
@@ -1322,9 +1302,9 @@ bn_almost_mont_sqr_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, aM, tmp, c);
   bn_almost_mont_reduction_u32(len, n, nInv_u64, c, resM);
 }
@@ -1338,28 +1318,28 @@ bn_almost_mont_reduction_u64(
   uint64_t *res
 )
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -1377,9 +1357,9 @@ bn_almost_mont_reduction_u64(
   uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp, 0U, len * sizeof (uint64_t));
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
@@ -1400,9 +1380,9 @@ bn_almost_mont_mul_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, aM, bM, tmp, c);
   bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
@@ -1419,9 +1399,9 @@ bn_almost_mont_sqr_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, aM, tmp, c);
   bn_almost_mont_reduction_u64(len, n, nInv_u64, c, resM);
 }
@@ -1439,56 +1419,56 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(
   uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t *b2 = (uint32_t *)alloca(bLen * sizeof (uint32_t));
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
@@ -1507,7 +1487,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t));
@@ -1515,9 +1495,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
     KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
     uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
     memset(c, 0U, (len + len) * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-    uint32_t *tmp0 = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+    uint32_t *tmp0 = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+    memset(tmp0, 0U, 4U * len * sizeof (uint32_t));
     Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
     Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
@@ -1531,13 +1511,13 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         bn_almost_mont_mul_u32(len, ctx_n0, mu, resM, aM, resM);
@@ -1558,31 +1538,31 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp0 = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp0 = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp0, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(resM, 0U, len * sizeof (uint32_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *ctx = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(ctx, 0U, (len + len) * sizeof (uint32_t));
   memcpy(ctx, n, len * sizeof (uint32_t));
   memcpy(ctx + len, r2, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len);
-  uint32_t *table = (uint32_t *)alloca((uint32_t)16U * len * sizeof (uint32_t));
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len);
+  uint32_t *table = (uint32_t *)alloca(16U * len * sizeof (uint32_t));
+  memset(table, 0U, 16U * len * sizeof (uint32_t));
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp, 0U, len * sizeof (uint32_t));
@@ -1593,21 +1573,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * len;
     uint32_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * len;
     uint32_t *ctx_n = ctx;
     bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
     const uint32_t *a_bits_l = table + bits_l32 * len;
     memcpy(resM, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
@@ -1621,16 +1601,16 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *tmp1 = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp1, 0U, len * sizeof (uint32_t));
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
     const uint32_t *a_bits_l = table + bits_l32 * len;
     memcpy(tmp1, (uint32_t *)a_bits_l, len * sizeof (uint32_t));
@@ -1656,7 +1636,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *aM = (uint32_t *)alloca(len * sizeof (uint32_t));
@@ -1664,9 +1644,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
     KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
     uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
     memset(c, 0U, (len + len) * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-    uint32_t *tmp0 = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+    uint32_t *tmp0 = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+    memset(tmp0, 0U, 4U * len * sizeof (uint32_t));
     Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c);
     Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, aM);
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
@@ -1677,20 +1657,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
     memset(ctx, 0U, (len + len) * sizeof (uint32_t));
     memcpy(ctx, n, len * sizeof (uint32_t));
     memcpy(ctx + len, r2, len * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -1701,9 +1681,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len; i++)
+    for (uint32_t i = 0U; i < len; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
@@ -1720,31 +1700,31 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c0 = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c0, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp0 = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp0 = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp0, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, r2, tmp0, c0);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c0, aM);
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *resM = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(resM, 0U, len * sizeof (uint32_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *ctx = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(ctx, 0U, (len + len) * sizeof (uint32_t));
   memcpy(ctx, n, len * sizeof (uint32_t));
   memcpy(ctx + len, r2, len * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len);
-  uint32_t *table = (uint32_t *)alloca((uint32_t)16U * len * sizeof (uint32_t));
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len);
+  uint32_t *table = (uint32_t *)alloca(16U * len * sizeof (uint32_t));
+  memset(table, 0U, 16U * len * sizeof (uint32_t));
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *tmp = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp, 0U, len * sizeof (uint32_t));
@@ -1755,29 +1735,29 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   Hacl_Bignum_Montgomery_bn_from_mont_u32(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * len;
     uint32_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u32(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * len;
     uint32_t *ctx_n = ctx;
     bn_almost_mont_mul_u32(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)(table + (uint32_t)0U * len), len * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -1793,24 +1773,24 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u32(
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *tmp1 = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp1, 0U, len * sizeof (uint32_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       bn_almost_mont_sqr_u32(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp1, (uint32_t *)(table + (uint32_t)0U * len), len * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp1, (uint32_t *)(table + 0U * len), len * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint32_t *os = tmp1;
         uint32_t x = (c & res_j[i]) | (~c & tmp1[i]);
@@ -1877,56 +1857,56 @@ Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(
   uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t));
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
@@ -1945,7 +1925,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t));
@@ -1953,9 +1933,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
     KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
     uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
     memset(c, 0U, (len + len) * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-    uint64_t *tmp0 = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+    uint64_t *tmp0 = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+    memset(tmp0, 0U, 4U * len * sizeof (uint64_t));
     Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
     Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
@@ -1969,13 +1949,13 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         bn_almost_mont_mul_u64(len, ctx_n0, mu, resM, aM, resM);
@@ -1996,31 +1976,31 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp0 = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp0 = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp0, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(resM, 0U, len * sizeof (uint64_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *ctx = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(ctx, 0U, (len + len) * sizeof (uint64_t));
   memcpy(ctx, n, len * sizeof (uint64_t));
   memcpy(ctx + len, r2, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len);
-  uint64_t *table = (uint64_t *)alloca((uint32_t)16U * len * sizeof (uint64_t));
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len);
+  uint64_t *table = (uint64_t *)alloca(16U * len * sizeof (uint64_t));
+  memset(table, 0U, 16U * len * sizeof (uint64_t));
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp, 0U, len * sizeof (uint64_t));
@@ -2031,21 +2011,21 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * len;
     uint64_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * len;
     uint64_t *ctx_n = ctx;
     bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
     const uint64_t *a_bits_l = table + bits_l32 * len;
     memcpy(resM, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
@@ -2059,16 +2039,16 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *tmp1 = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp1, 0U, len * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
     const uint64_t *a_bits_l = table + bits_l32 * len;
     memcpy(tmp1, (uint64_t *)a_bits_l, len * sizeof (uint64_t));
@@ -2094,7 +2074,7 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *aM = (uint64_t *)alloca(len * sizeof (uint64_t));
@@ -2102,9 +2082,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
     KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
     uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
     memset(c, 0U, (len + len) * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-    uint64_t *tmp0 = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-    memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+    uint64_t *tmp0 = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+    memset(tmp0, 0U, 4U * len * sizeof (uint64_t));
     Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c);
     Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, aM);
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
@@ -2115,20 +2095,20 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
     memset(ctx, 0U, (len + len) * sizeof (uint64_t));
     memcpy(ctx, n, len * sizeof (uint64_t));
     memcpy(ctx + len, r2, len * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      for (uint32_t i = 0U; i < len; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -2139,9 +2119,9 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len; i++)
+    for (uint32_t i = 0U; i < len; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
@@ -2158,31 +2138,31 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c0 = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c0, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp0 = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp0, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp0 = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp0, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, r2, tmp0, c0);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c0, aM);
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *resM = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(resM, 0U, len * sizeof (uint64_t));
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *ctx = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(ctx, 0U, (len + len) * sizeof (uint64_t));
   memcpy(ctx, n, len * sizeof (uint64_t));
   memcpy(ctx + len, r2, len * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len);
-  uint64_t *table = (uint64_t *)alloca((uint32_t)16U * len * sizeof (uint64_t));
-  memset(table, 0U, (uint32_t)16U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len);
+  uint64_t *table = (uint64_t *)alloca(16U * len * sizeof (uint64_t));
+  memset(table, 0U, 16U * len * sizeof (uint64_t));
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *tmp = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp, 0U, len * sizeof (uint64_t));
@@ -2193,29 +2173,29 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   Hacl_Bignum_Montgomery_bn_from_mont_u64(len, ctx_n0, mu, ctx_r20, t0);
   memcpy(t1, aM, len * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * len;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * len;
     uint64_t *ctx_n1 = ctx;
     bn_almost_mont_sqr_u64(len, ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len, tmp, len * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len;
+    memcpy(table + (2U * i + 2U) * len, tmp, len * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * len;
     uint64_t *ctx_n = ctx;
     bn_almost_mont_mul_u64(len, ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len, tmp, len * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * len, tmp, len * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)(table + (uint32_t)0U * len), len * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -2231,24 +2211,24 @@ Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64(
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *tmp1 = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp1, 0U, len * sizeof (uint64_t));
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       bn_almost_mont_sqr_u64(len, ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp1, (uint64_t *)(table + (uint32_t)0U * len), len * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp1, (uint64_t *)(table + 0U * len), len * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len;
-      for (uint32_t i = (uint32_t)0U; i < len; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * len;
+      for (uint32_t i = 0U; i < len; i++)
       {
         uint64_t *os = tmp1;
         uint64_t x = (c & res_j[i]) | (~c & tmp1[i]);
diff --git a/src/msvc/Hacl_Bignum256.c b/src/msvc/Hacl_Bignum256.c
index b516e70d..71bb7747 100644
--- a/src/msvc/Hacl_Bignum256.c
+++ b/src/msvc/Hacl_Bignum256.c
@@ -60,23 +60,23 @@ Write `a + b mod 2^256` in `res`.
 */
 uint64_t Hacl_Bignum256_add(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   return c;
@@ -91,23 +91,23 @@ Write `a - b mod 2^256` in `res`.
 */
 uint64_t Hacl_Bignum256_sub(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   return c;
@@ -125,52 +125,52 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum256_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -188,53 +188,53 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum256_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -248,30 +248,30 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum256_mul(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = a[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 /**
@@ -282,31 +282,31 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -314,29 +314,29 @@ void Hacl_Bignum256_sqr(uint64_t *a, uint64_t *res)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)512U - nBits; i0++)
+  memset(res, 0U, 4U * sizeof (uint64_t));
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++)
   {
     Hacl_Bignum256_add_mod(n, res, res, res);
   }
@@ -344,61 +344,61 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 
 static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)4U + i0;
-    uint64_t res_j = c[(uint32_t)4U + i0];
+    uint64_t *resb = c + 4U + i0;
+    uint64_t res_j = c[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, c + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c1 = (uint64_t)0U;
+  uint64_t c1 = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i);
   }
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -407,49 +407,49 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *
 static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, aM, 4U * sizeof (uint64_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i);
     }
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)4U + i0;
-    uint64_t res_j = c[(uint32_t)4U + i0];
+    uint64_t *resb = c + 4U + i0;
+    uint64_t res_j = c[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, c + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
   uint64_t c1 = Hacl_Bignum256_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
     os[i] = x;);
@@ -459,61 +459,61 @@ static inline void
 amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *resM)
 {
   uint64_t c[8U] = { 0U };
-  memset(c, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(c, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = bM[i0];
     uint64_t *res_j = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     {
-      uint64_t a_i = aM[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = aM[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c1, res_i0);
-      uint64_t a_i0 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = aM[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c1, res_i1);
-      uint64_t a_i1 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = aM[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c1, res_i2);
-      uint64_t a_i2 = aM[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = aM[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c1, res_i);
     }
     uint64_t r = c1;
-    c[(uint32_t)4U + i0] = r;);
+    c[4U + i0] = r;);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *resM)
 {
   uint64_t c[8U] = { 0U };
-  memset(c, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(c, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = aM;
     uint64_t a_j = aM[i0];
     uint64_t *res_j = c + i0;
-    uint64_t c1 = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c1 = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c1, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c1, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c1, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c1, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -521,20 +521,20 @@ static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint6
     }
     uint64_t r = c1;
     c[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, c, c);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, c, c, c);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res = FStar_UInt128_mul_wide(aM[i], aM[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, c, tmp, c);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, c, tmp, c);
+  KRML_MAYBE_UNUSED_VAR(c1);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -543,44 +543,44 @@ bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *r
 {
   uint64_t a_mod[4U] = { 0U };
   uint64_t a1[8U] = { 0U };
-  memcpy(a1, a, (uint32_t)8U * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
+  memcpy(a1, a, 8U * sizeof (uint64_t));
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t qj = mu * a1[i0];
     uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = a1 + (uint32_t)4U + i0;
-    uint64_t res_j = a1[(uint32_t)4U + i0];
+    uint64_t *resb = a1 + 4U + i0;
+    uint64_t res_j = a1[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(a_mod, a1 + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(a_mod, a1 + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
   uint64_t c1 = Hacl_Bignum256_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = a_mod;
     uint64_t x = (m & tmp[i]) | (~m & a_mod[i]);
     os[i] = x;);
@@ -603,23 +603,22 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t r2[4U] = { 0U };
     precompr2(nBits, n, r2);
@@ -628,68 +627,68 @@ bool Hacl_Bignum256_mod(uint64_t *n, uint64_t *a, uint64_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t));
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
   return m00 & m;
@@ -706,7 +705,7 @@ exp_vartime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[4U] = { 0U };
     uint64_t c[8U] = { 0U };
@@ -714,18 +713,18 @@ exp_vartime_precomp(
     reduction(n, mu, c, aM);
     uint64_t resM[4U] = { 0U };
     uint64_t ctx[8U] = { 0U };
-    memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+    memcpy(ctx, n, 4U * sizeof (uint64_t));
+    memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -734,7 +733,7 @@ exp_vartime_precomp(
       amont_sqr(ctx_n0, mu, aM, aM);
     }
     uint64_t tmp[8U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t));
+    memcpy(tmp, resM, 4U * sizeof (uint64_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -744,74 +743,70 @@ exp_vartime_precomp(
   reduction(n, mu, c, aM);
   uint64_t resM[4U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[8U] = { 0U };
-  memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(ctx, n, 4U * sizeof (uint64_t));
+  memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
   uint64_t table[64U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)4U;
+  uint64_t *t1 = table + 4U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)4U;
+  uint64_t *ctx_r20 = ctx + 4U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(t1, aM, 4U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 4U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U;
+    memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 4U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)4U;
-    memcpy(resM, (uint64_t *)a_bits_l, (uint32_t)4U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 4U;
+    memcpy(resM, (uint64_t *)a_bits_l, 4U * sizeof (uint64_t));
   }
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[4U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)4U;
-    memcpy(tmp0, (uint64_t *)a_bits_l, (uint32_t)4U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 4U;
+    memcpy(tmp0, (uint64_t *)a_bits_l, 4U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint64_t tmp1[8U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp1, resM, 4U * sizeof (uint64_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -826,7 +821,7 @@ exp_consttime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[4U] = { 0U };
     uint64_t c[8U] = { 0U };
@@ -834,24 +829,24 @@ exp_consttime_precomp(
     reduction(n, mu, c, aM);
     uint64_t resM[4U] = { 0U };
     uint64_t ctx[8U] = { 0U };
-    memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    memcpy(ctx, n, 4U * sizeof (uint64_t));
+    memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        0U,
+        4U,
+        1U,
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;);
       uint64_t *ctx_n0 = ctx;
@@ -862,14 +857,14 @@ exp_consttime_precomp(
     }
     uint64_t sw0 = sw;
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      0U,
+      4U,
+      1U,
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;);
     uint64_t tmp[8U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)4U * sizeof (uint64_t));
+    memcpy(tmp, resM, 4U * sizeof (uint64_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -879,56 +874,52 @@ exp_consttime_precomp(
   reduction(n, mu, c0, aM);
   uint64_t resM[4U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[8U] = { 0U };
-  memcpy(ctx, n, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)4U, r2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(ctx, n, 4U * sizeof (uint64_t));
+  memcpy(ctx + 4U, r2, 4U * sizeof (uint64_t));
   uint64_t table[64U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)4U;
+  uint64_t *t1 = table + 4U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)4U;
+  uint64_t *ctx_r20 = ctx + 4U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(t1, aM, 4U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)4U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 4U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)4U;
+    memcpy(table + (2U * i + 2U) * 4U, tmp, 4U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 4U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)4U,
-      tmp,
-      (uint32_t)4U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 4U, tmp, 4U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)table, (uint32_t)4U * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)table, 4U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)4U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 4U;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
         os[i] = x;););
@@ -936,31 +927,31 @@ exp_consttime_precomp(
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)4U;
+    uint64_t *ctx_r2 = ctx + 4U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[4U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)4U * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 4U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)4U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 4U;
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -968,7 +959,7 @@ exp_consttime_precomp(
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint64_t tmp1[8U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp1, resM, 4U * sizeof (uint64_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -1034,17 +1025,16 @@ Hacl_Bignum256_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -1077,17 +1067,16 @@ Hacl_Bignum256_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -1108,67 +1097,66 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum256_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[4U] = { 0U };
-  memset(one, 0U, (uint32_t)4U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
+  memset(one, 0U, 4U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t n2[4U] = { 0U };
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
-    uint64_t *a1 = n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
+    uint64_t *a1 = n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
     KRML_MAYBE_FOR3(i,
-      (uint32_t)0U,
-      (uint32_t)3U,
-      (uint32_t)1U,
+      0U,
+      3U,
+      1U,
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
     uint64_t c1 = c;
     uint64_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)256U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 256U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)4U * sizeof (uint64_t));
+    memset(res, 0U, 4U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -1192,17 +1180,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum256_mont_ctx_init(uint64_t *n)
 {
-  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t));
-  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint64_t));
+  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(4U, sizeof (uint64_t));
+  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(4U, sizeof (uint64_t));
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)4U * sizeof (uint64_t));
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)4U, n);
+  memcpy(n11, n, 4U * sizeof (uint64_t));
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(4U, n);
   precompr2(nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
-  res = { .len = (uint32_t)4U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = 4U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof (
@@ -1330,21 +1316,21 @@ Hacl_Bignum256_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k;
   uint64_t n2[4U] = { 0U };
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
-  uint64_t *a1 = k1.n + (uint32_t)1U;
-  uint64_t *res1 = n2 + (uint32_t)1U;
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
+  uint64_t *a1 = k1.n + 1U;
+  uint64_t *res1 = n2 + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
+    0U,
+    3U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res);
 }
 
 
@@ -1366,36 +1352,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum256_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -1415,36 +1393,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum256_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -1462,12 +1432,8 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum256_bn_to_bytes_be(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(res + i * (uint32_t)8U, b[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(res + i * 8U, b[4U - i - 1U]););
 }
 
 /**
@@ -1479,12 +1445,8 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum256_bn_to_bytes_le(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(res + i * (uint32_t)8U, b[i]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(res + i * 8U, b[i]););
 }
 
 
@@ -1500,14 +1462,14 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum256_lt_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   return acc;
 }
 
@@ -1518,11 +1480,11 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum256_eq_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
diff --git a/src/msvc/Hacl_Bignum256_32.c b/src/msvc/Hacl_Bignum256_32.c
index 1c8ce59b..e0a89697 100644
--- a/src/msvc/Hacl_Bignum256_32.c
+++ b/src/msvc/Hacl_Bignum256_32.c
@@ -60,26 +60,26 @@ Write `a + b mod 2^256` in `res`.
 */
 uint32_t Hacl_Bignum256_32_add(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i););
   return c;
 }
@@ -93,26 +93,26 @@ Write `a - b mod 2^256` in `res`.
 */
 uint32_t Hacl_Bignum256_32_sub(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i););
   return c;
 }
@@ -129,56 +129,56 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum256_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i););
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -196,57 +196,57 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum256_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i););
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c = (uint32_t)0U;
+  uint32_t c = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i););
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -260,32 +260,32 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum256_32_mul(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(res, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t bj = b[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
+    uint32_t c = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = a[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = a[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c, res_i0);
-      uint32_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = a[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c, res_i1);
-      uint32_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = a[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c, res_i2);
-      uint32_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = a[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c, res_i););
     uint32_t r = c;
-    res[(uint32_t)8U + i0] = r;);
+    res[8U + i0] = r;);
 }
 
 /**
@@ -296,31 +296,31 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(res, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *ab = a;
     uint32_t a_j = a[i0];
     uint32_t *res_j = res + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -328,29 +328,29 @@ void Hacl_Bignum256_32_sqr(uint32_t *a, uint32_t *res)
     }
     uint32_t r = c;
     res[i0 + i0] = r;);
-  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint32_t tmp[16U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t res1 = (uint64_t)a[i] * (uint64_t)a[i];
-    uint32_t hi = (uint32_t)(res1 >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res1 >> 32U);
     uint32_t lo = (uint32_t)res1;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)512U - nBits; i0++)
+  memset(res, 0U, 8U * sizeof (uint32_t));
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 512U - nBits; i0++)
   {
     Hacl_Bignum256_32_add_mod(n, res, res, res);
   }
@@ -358,65 +358,65 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 
 static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i););
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)8U + i0;
-    uint32_t res_j = c[(uint32_t)8U + i0];
+    uint32_t *resb = c + 8U + i0;
+    uint32_t res_j = c[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(res, c + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
-  uint32_t c1 = (uint32_t)0U;
+  uint32_t c1 = 0U;
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    2U,
+    1U,
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i););
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -425,51 +425,51 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *
 static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a)
 {
   uint32_t tmp[16U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp, aM, 8U * sizeof (uint32_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i););
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)8U + i0;
-    uint32_t res_j = c[(uint32_t)8U + i0];
+    uint32_t *resb = c + 8U + i0;
+    uint32_t res_j = c[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb););
-  memcpy(res, c + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(res, c + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
   uint32_t c1 = Hacl_Bignum256_32_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
     os[i] = x;);
@@ -479,63 +479,63 @@ static inline void
 amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *resM)
 {
   uint32_t c[16U] = { 0U };
-  memset(c, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(c, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t bj = bM[i0];
     uint32_t *res_j = c + i0;
-    uint32_t c1 = (uint32_t)0U;
+    uint32_t c1 = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = aM[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = aM[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, bj, c1, res_i0);
-      uint32_t a_i0 = aM[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = aM[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, bj, c1, res_i1);
-      uint32_t a_i1 = aM[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = aM[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, bj, c1, res_i2);
-      uint32_t a_i2 = aM[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = aM[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, bj, c1, res_i););
     uint32_t r = c1;
-    c[(uint32_t)8U + i0] = r;);
+    c[8U + i0] = r;);
   areduction(n, nInv_u64, c, resM);
 }
 
 static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *resM)
 {
   uint32_t c[16U] = { 0U };
-  memset(c, 0U, (uint32_t)16U * sizeof (uint32_t));
+  memset(c, 0U, 16U * sizeof (uint32_t));
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *ab = aM;
     uint32_t a_j = aM[i0];
     uint32_t *res_j = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint32_t a_i = ab[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint32_t a_i = ab[4U * i];
+      uint32_t *res_i0 = res_j + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, a_j, c1, res_i0);
-      uint32_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = ab[4U * i + 1U];
+      uint32_t *res_i1 = res_j + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, a_j, c1, res_i1);
-      uint32_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = ab[4U * i + 2U];
+      uint32_t *res_i2 = res_j + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, a_j, c1, res_i2);
-      uint32_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = ab[4U * i + 3U];
+      uint32_t *res_i = res_j + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, a_j, c1, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint32_t a_i = ab[i];
       uint32_t *res_i = res_j + i;
@@ -543,20 +543,20 @@ static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint3
     }
     uint32_t r = c1;
     c[i0 + i0] = r;);
-  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, c, c);
-  KRML_HOST_IGNORE(c0);
+  uint32_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, c, c, c);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint32_t tmp[16U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t res = (uint64_t)aM[i] * (uint64_t)aM[i];
-    uint32_t hi = (uint32_t)(res >> (uint32_t)32U);
+    uint32_t hi = (uint32_t)(res >> 32U);
     uint32_t lo = (uint32_t)res;
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32((uint32_t)16U, c, tmp, c);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint32_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u32(16U, c, tmp, c);
+  KRML_MAYBE_UNUSED_VAR(c1);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -565,46 +565,46 @@ bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *r
 {
   uint32_t a_mod[8U] = { 0U };
   uint32_t a1[16U] = { 0U };
-  memcpy(a1, a, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
+  memcpy(a1, a, 16U * sizeof (uint32_t));
+  uint32_t c0 = 0U;
   KRML_MAYBE_FOR8(i0,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t qj = mu * a1[i0];
     uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
+    uint32_t c = 0U;
     KRML_MAYBE_FOR2(i,
-      (uint32_t)0U,
-      (uint32_t)2U,
-      (uint32_t)1U,
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      2U,
+      1U,
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i););
     uint32_t r = c;
     uint32_t c1 = r;
-    uint32_t *resb = a1 + (uint32_t)8U + i0;
-    uint32_t res_j = a1[(uint32_t)8U + i0];
+    uint32_t *resb = a1 + 8U + i0;
+    uint32_t res_j = a1[8U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb););
-  memcpy(a_mod, a1 + (uint32_t)8U, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(a_mod, a1 + 8U, 8U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[8U] = { 0U };
   uint32_t c1 = Hacl_Bignum256_32_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = a_mod;
     uint32_t x = (m & tmp[i]) | (~m & a_mod[i]);
     os[i] = x;);
@@ -627,22 +627,22 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t r2[8U] = { 0U };
     precompr2(nBits, n, r2);
@@ -651,68 +651,68 @@ bool Hacl_Bignum256_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t *b2 = (uint32_t *)alloca(bLen * sizeof (uint32_t));
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
   return m00 & m;
@@ -729,7 +729,7 @@ exp_vartime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[8U] = { 0U };
     uint32_t c[16U] = { 0U };
@@ -737,18 +737,18 @@ exp_vartime_precomp(
     reduction(n, mu, c, aM);
     uint32_t resM[8U] = { 0U };
     uint32_t ctx[16U] = { 0U };
-    memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(ctx, n, 8U * sizeof (uint32_t));
+    memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -757,7 +757,7 @@ exp_vartime_precomp(
       amont_sqr(ctx_n0, mu, aM, aM);
     }
     uint32_t tmp[16U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(tmp, resM, 8U * sizeof (uint32_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -767,74 +767,70 @@ exp_vartime_precomp(
   reduction(n, mu, c, aM);
   uint32_t resM[8U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[16U] = { 0U };
-  memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(ctx, n, 8U * sizeof (uint32_t));
+  memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
   uint32_t table[128U] = { 0U };
   uint32_t tmp[8U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)8U;
+  uint32_t *t1 = table + 8U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)8U;
+  uint32_t *ctx_r20 = ctx + 8U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(t1, aM, 8U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 8U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U;
+    memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 8U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)8U;
-    memcpy(resM, (uint32_t *)a_bits_l, (uint32_t)8U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 8U;
+    memcpy(resM, (uint32_t *)a_bits_l, 8U * sizeof (uint32_t));
   }
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[8U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)8U;
-    memcpy(tmp0, (uint32_t *)a_bits_l, (uint32_t)8U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 8U;
+    memcpy(tmp0, (uint32_t *)a_bits_l, 8U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint32_t tmp1[16U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp1, resM, 8U * sizeof (uint32_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -849,7 +845,7 @@ exp_consttime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[8U] = { 0U };
     uint32_t c[16U] = { 0U };
@@ -857,24 +853,24 @@ exp_consttime_precomp(
     reduction(n, mu, c, aM);
     uint32_t resM[8U] = { 0U };
     uint32_t ctx[16U] = { 0U };
-    memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    memcpy(ctx, n, 8U * sizeof (uint32_t));
+    memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        0U,
+        8U,
+        1U,
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;);
       uint32_t *ctx_n0 = ctx;
@@ -885,14 +881,14 @@ exp_consttime_precomp(
     }
     uint32_t sw0 = sw;
     KRML_MAYBE_FOR8(i,
-      (uint32_t)0U,
-      (uint32_t)8U,
-      (uint32_t)1U,
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      0U,
+      8U,
+      1U,
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;);
     uint32_t tmp[16U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)8U * sizeof (uint32_t));
+    memcpy(tmp, resM, 8U * sizeof (uint32_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -902,56 +898,52 @@ exp_consttime_precomp(
   reduction(n, mu, c0, aM);
   uint32_t resM[8U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[16U] = { 0U };
-  memcpy(ctx, n, (uint32_t)8U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)8U, r2, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(ctx, n, 8U * sizeof (uint32_t));
+  memcpy(ctx + 8U, r2, 8U * sizeof (uint32_t));
   uint32_t table[128U] = { 0U };
   uint32_t tmp[8U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)8U;
+  uint32_t *t1 = table + 8U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)8U;
+  uint32_t *ctx_r20 = ctx + 8U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(t1, aM, 8U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)8U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 8U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)8U;
+    memcpy(table + (2U * i + 2U) * 8U, tmp, 8U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 8U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)8U,
-      tmp,
-      (uint32_t)8U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 8U, tmp, 8U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)table, (uint32_t)8U * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)table, 8U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)8U;
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 8U;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
+        0U,
+        8U,
+        1U,
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
         os[i] = x;););
@@ -959,31 +951,31 @@ exp_consttime_precomp(
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)8U;
+    uint32_t *ctx_r2 = ctx + 8U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[8U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint32_t *)table, (uint32_t)8U * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)table, 8U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)8U;
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 8U;
       KRML_MAYBE_FOR8(i,
-        (uint32_t)0U,
-        (uint32_t)8U,
-        (uint32_t)1U,
+        0U,
+        8U,
+        1U,
         uint32_t *os = tmp0;
         uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -991,7 +983,7 @@ exp_consttime_precomp(
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint32_t tmp1[16U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp1, resM, 8U * sizeof (uint32_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -1057,16 +1049,16 @@ Hacl_Bignum256_32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -1099,16 +1091,16 @@ Hacl_Bignum256_32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -1129,80 +1121,80 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum256_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[8U] = { 0U };
-  memset(one, 0U, (uint32_t)8U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
+  memset(one, 0U, 8U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   uint32_t bn_zero[8U] = { 0U };
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
+  uint32_t mask = 0xFFFFFFFFU;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t n2[8U] = { 0U };
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
-    uint32_t *a1 = n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
+    uint32_t *a1 = n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
     {
-      uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * 0U];
+      uint32_t *res_i0 = res1 + 4U * 0U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * 0U + 1U];
+      uint32_t *res_i1 = res1 + 4U * 0U + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * 0U + 2U];
+      uint32_t *res_i2 = res1 + 4U * 0U + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * 0U + 3U];
+      uint32_t *res_i = res1 + 4U * 0U + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
     KRML_MAYBE_FOR3(i,
-      (uint32_t)4U,
-      (uint32_t)7U,
-      (uint32_t)1U,
+      4U,
+      7U,
+      1U,
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
     uint32_t c1 = c;
     uint32_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)256U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 256U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)8U * sizeof (uint32_t));
+    memset(res, 0U, 8U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -1226,16 +1218,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum256_32_mont_ctx_init(uint32_t *n)
 {
-  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
-  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
+  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)8U * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)8U, n);
+  memcpy(n11, n, 8U * sizeof (uint32_t));
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(8U, n);
   precompr2(nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
-  res = { .len = (uint32_t)8U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = 8U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof (
@@ -1363,35 +1354,35 @@ Hacl_Bignum256_32_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k;
   uint32_t n2[8U] = { 0U };
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
-  uint32_t *a1 = k1.n + (uint32_t)1U;
-  uint32_t *res1 = n2 + (uint32_t)1U;
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
+  uint32_t *a1 = k1.n + 1U;
+  uint32_t *res1 = n2 + 1U;
   uint32_t c = c0;
   {
-    uint32_t t1 = a1[(uint32_t)4U * (uint32_t)0U];
-    uint32_t *res_i0 = res1 + (uint32_t)4U * (uint32_t)0U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-    uint32_t t10 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint32_t *res_i1 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-    uint32_t t11 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint32_t *res_i2 = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-    uint32_t t12 = a1[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint32_t *res_i = res1 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+    uint32_t t1 = a1[4U * 0U];
+    uint32_t *res_i0 = res1 + 4U * 0U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+    uint32_t t10 = a1[4U * 0U + 1U];
+    uint32_t *res_i1 = res1 + 4U * 0U + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+    uint32_t t11 = a1[4U * 0U + 2U];
+    uint32_t *res_i2 = res1 + 4U * 0U + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+    uint32_t t12 = a1[4U * 0U + 3U];
+    uint32_t *res_i = res1 + 4U * 0U + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
   }
   KRML_MAYBE_FOR3(i,
-    (uint32_t)4U,
-    (uint32_t)7U,
-    (uint32_t)1U,
+    4U,
+    7U,
+    1U,
     uint32_t t1 = a1[i];
     uint32_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)256U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 256U, n2, res);
 }
 
 
@@ -1413,36 +1404,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -1462,36 +1445,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum256_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -1509,12 +1484,8 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum256_32_bn_to_bytes_be(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(res + i * (uint32_t)4U, b[(uint32_t)8U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(res + i * 4U, b[8U - i - 1U]););
 }
 
 /**
@@ -1526,12 +1497,8 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum256_32_bn_to_bytes_le(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_le(res + i * (uint32_t)4U, b[i]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_le(res + i * 4U, b[i]););
 }
 
 
@@ -1547,14 +1514,14 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum256_32_lt_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
+  uint32_t acc = 0U;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U))););
   return acc;
 }
 
@@ -1565,11 +1532,11 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum256_32_eq_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
+  uint32_t mask = 0xFFFFFFFFU;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint32_t mask1 = mask;
diff --git a/src/msvc/Hacl_Bignum32.c b/src/msvc/Hacl_Bignum32.c
index f719a08e..df140b17 100644
--- a/src/msvc/Hacl_Bignum32.c
+++ b/src/msvc/Hacl_Bignum32.c
@@ -105,9 +105,9 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum32_mul(uint32_t len, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a, b, tmp, res);
 }
 
@@ -119,9 +119,9 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum32_sqr(uint32_t len, uint32_t *a, uint32_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(len, a, tmp, res);
 }
 
@@ -142,28 +142,28 @@ bn_slow_precomp(
   uint32_t *a1 = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(a1, 0U, (len + len) * sizeof (uint32_t));
   memcpy(a1, a, (len + len) * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint32_t qj = mu * a1[i0];
     uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint32_t a_i = n[i];
       uint32_t *res_i = res_j0 + i;
@@ -181,9 +181,9 @@ bn_slow_precomp(
   uint32_t *tmp0 = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(tmp0, 0U, len * sizeof (uint32_t));
   uint32_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u32(len, a_mod, n, tmp0);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t *os = a_mod;
     uint32_t x = (m & tmp0[i]) | (~m & a_mod[i]);
@@ -192,9 +192,9 @@ bn_slow_precomp(
   KRML_CHECK_SIZE(sizeof (uint32_t), len + len);
   uint32_t *c = (uint32_t *)alloca((len + len) * sizeof (uint32_t));
   memset(c, 0U, (len + len) * sizeof (uint32_t));
-  KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)4U * len);
-  uint32_t *tmp = (uint32_t *)alloca((uint32_t)4U * len * sizeof (uint32_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), 4U * len);
+  uint32_t *tmp = (uint32_t *)alloca(4U * len * sizeof (uint32_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint32_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(len, a_mod, r2, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u32(len, n, mu, c, res);
 }
@@ -216,20 +216,20 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res)
   uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *r2 = (uint32_t *)alloca(len * sizeof (uint32_t));
@@ -242,7 +242,7 @@ bool Hacl_Bignum32_mod(uint32_t len, uint32_t *n, uint32_t *a, uint32_t *res)
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -276,8 +276,8 @@ Hacl_Bignum32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, nBits, n, a, bBits, b, res);
   }
@@ -285,7 +285,7 @@ Hacl_Bignum32_mod_exp_vartime(
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -319,8 +319,8 @@ Hacl_Bignum32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u32(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u32(len, nBits, n, a, bBits, b, res);
   }
@@ -328,7 +328,7 @@ Hacl_Bignum32_mod_exp_consttime(
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -353,23 +353,23 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
   uint32_t *one = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
   memset(one, 0U, len * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   KRML_CHECK_SIZE(sizeof (uint32_t), len);
   uint32_t *bn_zero = (uint32_t *)alloca(len * sizeof (uint32_t));
   memset(bn_zero, 0U, len * sizeof (uint32_t));
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -377,53 +377,48 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len);
     uint32_t *n2 = (uint32_t *)alloca(len * sizeof (uint32_t));
     memset(n2, 0U, len * sizeof (uint32_t));
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
     uint32_t c1;
-    if ((uint32_t)1U < len)
+    if (1U < len)
     {
-      uint32_t *a1 = n + (uint32_t)1U;
-      uint32_t *res1 = n2 + (uint32_t)1U;
+      uint32_t *a1 = n + 1U;
+      uint32_t *res1 = n2 + 1U;
       uint32_t c = c0;
-      for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++)
+      for (uint32_t i = 0U; i < (len - 1U) / 4U; i++)
       {
-        uint32_t t1 = a1[(uint32_t)4U * i];
-        uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-        uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-        uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-        uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-        uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-        uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-        uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+        uint32_t t1 = a1[4U * i];
+        uint32_t *res_i0 = res1 + 4U * i;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+        uint32_t t10 = a1[4U * i + 1U];
+        uint32_t *res_i1 = res1 + 4U * i + 1U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+        uint32_t t11 = a1[4U * i + 2U];
+        uint32_t *res_i2 = res1 + 4U * i + 2U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+        uint32_t t12 = a1[4U * i + 3U];
+        uint32_t *res_i = res1 + 4U * i + 3U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
       }
-      for
-      (uint32_t
-        i = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-        i
-        < len - (uint32_t)1U;
-        i++)
+      for (uint32_t i = (len - 1U) / 4U * 4U; i < len - 1U; i++)
       {
         uint32_t t1 = a1[i];
         uint32_t *res_i = res1 + i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
       }
       uint32_t c10 = c;
       c1 = c10;
@@ -432,20 +427,14 @@ bool Hacl_Bignum32_mod_inv_prime_vartime(uint32_t len, uint32_t *n, uint32_t *a,
     {
       c1 = c0;
     }
-    KRML_HOST_IGNORE(c1);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len,
-      nBits,
-      n,
-      a,
-      (uint32_t)32U * len,
-      n2,
-      res);
+    KRML_MAYBE_UNUSED_VAR(c1);
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u32(len, nBits, n, a, 32U * len, n2, res);
   }
   else
   {
     memset(res, 0U, len * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -477,7 +466,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -632,38 +621,33 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t *n2 = (uint32_t *)alloca(len1 * sizeof (uint32_t));
   memset(n2, 0U, len1 * sizeof (uint32_t));
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
   uint32_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint32_t *a1 = k1.n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t *a1 = k1.n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     c1 = c10;
@@ -672,13 +656,13 @@ Hacl_Bignum32_mod_inv_prime_vartime_precomp(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u32(len1,
     k1.n,
     k1.mu,
     k1.r2,
     a,
-    (uint32_t)32U * len1,
+    32U * len1,
     n2,
     res);
 }
@@ -702,36 +686,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -751,36 +727,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -797,14 +765,14 @@ Serialize a bignum into big-endian memory.
 */
 void Hacl_Bignum32_bn_to_bytes_be(uint32_t len, uint32_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store32_be(tmp + i * (uint32_t)4U, b[bnLen - i - (uint32_t)1U]);
+    store32_be(tmp + i * 4U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
@@ -817,14 +785,14 @@ Serialize a bignum into little-endian memory.
 */
 void Hacl_Bignum32_bn_to_bytes_le(uint32_t len, uint32_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store32_le(tmp + i * (uint32_t)4U, b[i]);
+    store32_le(tmp + i * 4U, b[i]);
   }
   memcpy(res, tmp, len * sizeof (uint8_t));
 }
@@ -842,12 +810,12 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum32_lt_mask(uint32_t len, uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   return acc;
 }
@@ -859,8 +827,8 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum32_eq_mask(uint32_t len, uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/msvc/Hacl_Bignum4096.c b/src/msvc/Hacl_Bignum4096.c
index ee51cc5e..fc4aefc0 100644
--- a/src/msvc/Hacl_Bignum4096.c
+++ b/src/msvc/Hacl_Bignum4096.c
@@ -63,26 +63,26 @@ Write `a + b mod 2^4096` in `res`.
 */
 uint64_t Hacl_Bignum4096_add(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i););
   return c;
 }
@@ -96,26 +96,26 @@ Write `a - b mod 2^4096` in `res`.
 */
 uint64_t Hacl_Bignum4096_sub(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i););
   return c;
 }
@@ -132,53 +132,53 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum4096_add_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i););
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -198,54 +198,54 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum4096_sub_mod(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = a[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = a[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i););
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i););
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -262,7 +262,7 @@ Write `a * b` in `res`.
 void Hacl_Bignum4096_mul(uint64_t *a, uint64_t *b, uint64_t *res)
 {
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, a, b, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(64U, a, b, tmp, res);
 }
 
 /**
@@ -274,16 +274,16 @@ Write `a * a` in `res`.
 void Hacl_Bignum4096_sqr(uint64_t *a, uint64_t *res)
 {
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, a, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(64U, a, tmp, res);
 }
 
 static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
-  uint32_t i = nBits / (uint32_t)64U;
-  uint32_t j = nBits % (uint32_t)64U;
-  res[i] = res[i] | (uint64_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8192U - nBits; i0++)
+  memset(res, 0U, 64U * sizeof (uint64_t));
+  uint32_t i = nBits / 64U;
+  uint32_t j = nBits % 64U;
+  res[i] = res[i] | 1ULL << j;
+  for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++)
   {
     Hacl_Bignum4096_add_mod(n, res, res, res);
   }
@@ -291,61 +291,61 @@ static inline void precompr2(uint32_t nBits, uint64_t *n, uint64_t *res)
 
 static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i););
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)64U + i0;
-    uint64_t res_j = c[(uint32_t)64U + i0];
+    uint64_t *resb = c + 64U + i0;
+    uint64_t res_j = c[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(res, c + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
-  uint64_t c1 = (uint64_t)0U;
+  uint64_t c1 = 0ULL;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t t1 = res[(uint32_t)4U * i];
-    uint64_t t20 = n[(uint32_t)4U * i];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * i;
+    0U,
+    16U,
+    1U,
+    uint64_t t1 = res[4U * i];
+    uint64_t t20 = n[4U * i];
+    uint64_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = res[4U * i + 1U];
+    uint64_t t21 = n[4U * i + 1U];
+    uint64_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = res[4U * i + 2U];
+    uint64_t t22 = n[4U * i + 2U];
+    uint64_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = res[4U * i + 3U];
+    uint64_t t2 = n[4U * i + 3U];
+    uint64_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c1, t12, t2, res_i););
   uint64_t c10 = c1;
   uint64_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -356,47 +356,47 @@ static inline void reduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *
 static inline void from(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *a)
 {
   uint64_t tmp[128U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(tmp, aM, 64U * sizeof (uint64_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint64_t *n, uint64_t nInv, uint64_t *c, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = nInv * c[i0];
     uint64_t *res_j0 = c + i0;
-    uint64_t c1 = (uint64_t)0U;
+    uint64_t c1 = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c1, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c1, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c1, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c1, res_i););
     uint64_t r = c1;
     uint64_t c10 = r;
-    uint64_t *resb = c + (uint32_t)64U + i0;
-    uint64_t res_j = c[(uint32_t)64U + i0];
+    uint64_t *resb = c + 64U + i0;
+    uint64_t res_j = c[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(res, c + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
   uint64_t c1 = Hacl_Bignum4096_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = res;
     uint64_t x = (m & tmp[i]) | (~m & res[i]);
@@ -409,7 +409,7 @@ amont_mul(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint64_t *bM, uint64_t *
 {
   uint64_t c[128U] = { 0U };
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64((uint32_t)64U, aM, bM, tmp, c);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(64U, aM, bM, tmp, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -417,7 +417,7 @@ static inline void amont_sqr(uint64_t *n, uint64_t nInv_u64, uint64_t *aM, uint6
 {
   uint64_t c[128U] = { 0U };
   uint64_t tmp[256U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64((uint32_t)64U, aM, tmp, c);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(64U, aM, tmp, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -426,42 +426,42 @@ bn_slow_precomp(uint64_t *n, uint64_t mu, uint64_t *r2, uint64_t *a, uint64_t *r
 {
   uint64_t a_mod[64U] = { 0U };
   uint64_t a1[128U] = { 0U };
-  memcpy(a1, a, (uint32_t)128U * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  memcpy(a1, a, 128U * sizeof (uint64_t));
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
     uint64_t qj = mu * a1[i0];
     uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      0U,
+      16U,
+      1U,
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i););
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = a1 + (uint32_t)64U + i0;
-    uint64_t res_j = a1[(uint32_t)64U + i0];
+    uint64_t *resb = a1 + 64U + i0;
+    uint64_t res_j = a1[64U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb);
   }
-  memcpy(a_mod, a1 + (uint32_t)64U, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(a_mod, a1 + 64U, 64U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[64U] = { 0U };
   uint64_t c1 = Hacl_Bignum4096_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t *os = a_mod;
     uint64_t x = (m & tmp[i]) | (~m & a_mod[i]);
@@ -486,22 +486,21 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t r2[64U] = { 0U };
     precompr2(nBits, n, r2);
@@ -510,65 +509,65 @@ bool Hacl_Bignum4096_mod(uint64_t *n, uint64_t *a, uint64_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static uint64_t exp_check(uint64_t *n, uint64_t *a, uint32_t bBits, uint64_t *b)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m10 = acc0;
   uint64_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t m1;
-  if (bBits < (uint32_t)64U * bLen)
+  if (bBits < 64U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
     uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t));
     memset(b2, 0U, bLen * sizeof (uint64_t));
-    uint32_t i0 = bBits / (uint32_t)64U;
-    uint32_t j = bBits % (uint32_t)64U;
-    b2[i0] = b2[i0] | (uint64_t)1U << j;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 64U;
+    uint32_t j = bBits % 64U;
+    b2[i0] = b2[i0] | 1ULL << j;
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    m1 = 0xFFFFFFFFFFFFFFFFULL;
   }
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t m = m1 & m2;
@@ -586,7 +585,7 @@ exp_vartime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[64U] = { 0U };
     uint64_t c[128U] = { 0U };
@@ -594,18 +593,18 @@ exp_vartime_precomp(
     reduction(n, mu, c, aM);
     uint64_t resM[64U] = { 0U };
     uint64_t ctx[128U] = { 0U };
-    memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+    memcpy(ctx, n, 64U * sizeof (uint64_t));
+    memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -614,7 +613,7 @@ exp_vartime_precomp(
       amont_sqr(ctx_n0, mu, aM, aM);
     }
     uint64_t tmp[128U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t));
+    memcpy(tmp, resM, 64U * sizeof (uint64_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -624,74 +623,70 @@ exp_vartime_precomp(
   reduction(n, mu, c, aM);
   uint64_t resM[64U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[128U] = { 0U };
-  memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(ctx, n, 64U * sizeof (uint64_t));
+  memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
   uint64_t table[1024U] = { 0U };
   uint64_t tmp[64U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)64U;
+  uint64_t *t1 = table + 64U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)64U;
+  uint64_t *ctx_r20 = ctx + 64U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(t1, aM, 64U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 64U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U;
+    memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 64U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
     uint32_t bits_l32 = (uint32_t)bits_c;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)64U;
-    memcpy(resM, (uint64_t *)a_bits_l, (uint32_t)64U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 64U;
+    memcpy(resM, (uint64_t *)a_bits_l, 64U * sizeof (uint64_t));
   }
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[64U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
     uint32_t bits_l32 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l = table + bits_l32 * (uint32_t)64U;
-    memcpy(tmp0, (uint64_t *)a_bits_l, (uint32_t)64U * sizeof (uint64_t));
+    const uint64_t *a_bits_l = table + bits_l32 * 64U;
+    memcpy(tmp0, (uint64_t *)a_bits_l, 64U * sizeof (uint64_t));
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint64_t tmp1[128U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(tmp1, resM, 64U * sizeof (uint64_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -706,7 +701,7 @@ exp_consttime_precomp(
   uint64_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint64_t aM[64U] = { 0U };
     uint64_t c[128U] = { 0U };
@@ -714,22 +709,22 @@ exp_consttime_precomp(
     reduction(n, mu, c, aM);
     uint64_t resM[64U] = { 0U };
     uint64_t ctx[128U] = { 0U };
-    memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-    memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    memcpy(ctx, n, 64U * sizeof (uint64_t));
+    memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      for (uint32_t i = 0U; i < 64U; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -740,14 +735,14 @@ exp_consttime_precomp(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+    for (uint32_t i = 0U; i < 64U; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
     uint64_t tmp[128U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)64U * sizeof (uint64_t));
+    memcpy(tmp, resM, 64U * sizeof (uint64_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -757,53 +752,49 @@ exp_consttime_precomp(
   reduction(n, mu, c0, aM);
   uint64_t resM[64U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 64U + 1U;
   }
   uint64_t ctx[128U] = { 0U };
-  memcpy(ctx, n, (uint32_t)64U * sizeof (uint64_t));
-  memcpy(ctx + (uint32_t)64U, r2, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(ctx, n, 64U * sizeof (uint64_t));
+  memcpy(ctx + 64U, r2, 64U * sizeof (uint64_t));
   uint64_t table[1024U] = { 0U };
   uint64_t tmp[64U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)64U;
+  uint64_t *t1 = table + 64U;
   uint64_t *ctx_n0 = ctx;
-  uint64_t *ctx_r20 = ctx + (uint32_t)64U;
+  uint64_t *ctx_r20 = ctx + 64U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(t1, aM, 64U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)64U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 64U;
     uint64_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)64U;
+    memcpy(table + (2U * i + 2U) * 64U, tmp, 64U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 64U;
     uint64_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)64U,
-      tmp,
-      (uint32_t)64U * sizeof (uint64_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 64U, tmp, 64U * sizeof (uint64_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint64_t *)table, (uint32_t)64U * sizeof (uint64_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+    memcpy(resM, (uint64_t *)table, 64U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)64U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 64U;
+      for (uint32_t i = 0U; i < 64U; i++)
       {
         uint64_t *os = resM;
         uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -813,28 +804,28 @@ exp_consttime_precomp(
   else
   {
     uint64_t *ctx_n = ctx;
-    uint64_t *ctx_r2 = ctx + (uint32_t)64U;
+    uint64_t *ctx_r2 = ctx + 64U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint64_t tmp0[64U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint64_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)64U * sizeof (uint64_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 64U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)64U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 64U;
+      for (uint32_t i = 0U; i < 64U; i++)
       {
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -844,7 +835,7 @@ exp_consttime_precomp(
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint64_t tmp1[128U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)64U * sizeof (uint64_t));
+  memcpy(tmp1, resM, 64U * sizeof (uint64_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -910,17 +901,16 @@ Hacl_Bignum4096_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -953,17 +943,16 @@ Hacl_Bignum4096_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -984,22 +973,22 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *res)
 {
   uint64_t one[64U] = { 0U };
-  memset(one, 0U, (uint32_t)64U * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memset(one, 0U, 64U * sizeof (uint64_t));
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   uint64_t bn_zero[64U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -1007,57 +996,56 @@ bool Hacl_Bignum4096_mod_inv_prime_vartime(uint64_t *n, uint64_t *a, uint64_t *r
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     uint64_t n2[64U] = { 0U };
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
-    uint64_t *a1 = n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
+    uint64_t *a1 = n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
     KRML_MAYBE_FOR15(i,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i););
+      0U,
+      15U,
+      1U,
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i););
     KRML_MAYBE_FOR3(i,
-      (uint32_t)60U,
-      (uint32_t)63U,
-      (uint32_t)1U,
+      60U,
+      63U,
+      1U,
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
     uint64_t c1 = c;
     uint64_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 4096U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)64U * sizeof (uint64_t));
+    memset(res, 0U, 64U * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -1081,17 +1069,15 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *Hacl_Bignum4096_mont_ctx_init(uint64_t *n)
 {
-  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t));
-  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint64_t));
+  uint64_t *r2 = (uint64_t *)KRML_HOST_CALLOC(64U, sizeof (uint64_t));
+  uint64_t *n1 = (uint64_t *)KRML_HOST_CALLOC(64U, sizeof (uint64_t));
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)64U * sizeof (uint64_t));
-  uint32_t
-  nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64((uint32_t)64U, n);
+  memcpy(n11, n, 64U * sizeof (uint64_t));
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(64U, n);
   precompr2(nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
-  res = { .len = (uint32_t)64U, .n = n11, .mu = mu, .r2 = r21 };
+  Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = 64U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 *)KRML_HOST_MALLOC(sizeof (
@@ -1219,37 +1205,37 @@ Hacl_Bignum4096_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 k1 = *k;
   uint64_t n2[64U] = { 0U };
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
-  uint64_t *a1 = k1.n + (uint32_t)1U;
-  uint64_t *res1 = n2 + (uint32_t)1U;
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
+  uint64_t *a1 = k1.n + 1U;
+  uint64_t *res1 = n2 + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t t1 = a1[(uint32_t)4U * i];
-    uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-    uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-    uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-    uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i););
+    0U,
+    15U,
+    1U,
+    uint64_t t1 = a1[4U * i];
+    uint64_t *res_i0 = res1 + 4U * i;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+    uint64_t t10 = a1[4U * i + 1U];
+    uint64_t *res_i1 = res1 + 4U * i + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+    uint64_t t11 = a1[4U * i + 2U];
+    uint64_t *res_i2 = res1 + 4U * i + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+    uint64_t t12 = a1[4U * i + 3U];
+    uint64_t *res_i = res1 + 4U * i + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i););
   KRML_MAYBE_FOR3(i,
-    (uint32_t)60U,
-    (uint32_t)63U,
-    (uint32_t)1U,
+    60U,
+    63U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
   uint64_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res);
 }
 
 
@@ -1271,36 +1257,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum4096_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -1320,36 +1298,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum4096_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -1367,10 +1337,10 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum4096_bn_to_bytes_be(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 64U; i++)
   {
-    store64_be(res + i * (uint32_t)8U, b[(uint32_t)64U - i - (uint32_t)1U]);
+    store64_be(res + i * 8U, b[64U - i - 1U]);
   }
 }
 
@@ -1383,10 +1353,10 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum4096_bn_to_bytes_le(uint64_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 64U; i++)
   {
-    store64_le(res + i * (uint32_t)8U, b[i]);
+    store64_le(res + i * 8U, b[i]);
   }
 }
 
@@ -1403,12 +1373,12 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum4096_lt_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   return acc;
 }
@@ -1420,8 +1390,8 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum4096_eq_mask(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/msvc/Hacl_Bignum4096_32.c b/src/msvc/Hacl_Bignum4096_32.c
index 790d0428..c35b7697 100644
--- a/src/msvc/Hacl_Bignum4096_32.c
+++ b/src/msvc/Hacl_Bignum4096_32.c
@@ -64,24 +64,24 @@ Write `a + b mod 2^4096` in `res`.
 */
 uint32_t Hacl_Bignum4096_32_add(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
   return c;
@@ -96,24 +96,24 @@ Write `a - b mod 2^4096` in `res`.
 */
 uint32_t Hacl_Bignum4096_32_sub(uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
   return c;
@@ -131,51 +131,51 @@ Write `(a + b) mod n` in `res`.
 */
 void Hacl_Bignum4096_32_add_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, t12, t2, res_i);
   }
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, t2, res_i);
   }
   uint32_t c1 = c;
   uint32_t c2 = c00 - c1;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -195,52 +195,52 @@ Write `(a - b) mod n` in `res`.
 */
 void Hacl_Bignum4096_32_sub_mod(uint32_t *n, uint32_t *a, uint32_t *b, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c0 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = a[(uint32_t)4U * i];
-    uint32_t t20 = b[(uint32_t)4U * i];
-    uint32_t *res_i0 = res + (uint32_t)4U * i;
+    uint32_t t1 = a[4U * i];
+    uint32_t t20 = b[4U * i];
+    uint32_t *res_i0 = res + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t1, t20, res_i0);
-    uint32_t t10 = a[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = a[4U * i + 1U];
+    uint32_t t21 = b[4U * i + 1U];
+    uint32_t *res_i1 = res + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t10, t21, res_i1);
-    uint32_t t11 = a[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = a[4U * i + 2U];
+    uint32_t t22 = b[4U * i + 2U];
+    uint32_t *res_i2 = res + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t11, t22, res_i2);
-    uint32_t t12 = a[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = a[4U * i + 3U];
+    uint32_t t2 = b[4U * i + 3U];
+    uint32_t *res_i = res + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c0, t12, t2, res_i);
   }
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u32(c, t12, t2, res_i);
   }
   uint32_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint32_t c2 = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t c2 = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & tmp[i]) | (~c2 & res[i]);
@@ -257,7 +257,7 @@ Write `a * b` in `res`.
 void Hacl_Bignum4096_32_mul(uint32_t *a, uint32_t *b, uint32_t *res)
 {
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, a, b, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(128U, a, b, tmp, res);
 }
 
 /**
@@ -269,16 +269,16 @@ Write `a * a` in `res`.
 void Hacl_Bignum4096_32_sqr(uint32_t *a, uint32_t *res)
 {
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, a, tmp, res);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(128U, a, tmp, res);
 }
 
 static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 {
-  memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t i = nBits / (uint32_t)32U;
-  uint32_t j = nBits % (uint32_t)32U;
-  res[i] = res[i] | (uint32_t)1U << j;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)8192U - nBits; i0++)
+  memset(res, 0U, 128U * sizeof (uint32_t));
+  uint32_t i = nBits / 32U;
+  uint32_t j = nBits % 32U;
+  res[i] = res[i] | 1U << j;
+  for (uint32_t i0 = 0U; i0 < 8192U - nBits; i0++)
   {
     Hacl_Bignum4096_32_add_mod(n, res, res, res);
   }
@@ -286,59 +286,59 @@ static inline void precompr2(uint32_t nBits, uint32_t *n, uint32_t *res)
 
 static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)128U + i0;
-    uint32_t res_j = c[(uint32_t)128U + i0];
+    uint32_t *resb = c + 128U + i0;
+    uint32_t res_j = c[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(res, c + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
-  uint32_t c1 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint32_t c1 = 0U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
-    uint32_t t1 = res[(uint32_t)4U * i];
-    uint32_t t20 = n[(uint32_t)4U * i];
-    uint32_t *res_i0 = tmp + (uint32_t)4U * i;
+    uint32_t t1 = res[4U * i];
+    uint32_t t20 = n[4U * i];
+    uint32_t *res_i0 = tmp + 4U * i;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t1, t20, res_i0);
-    uint32_t t10 = res[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t t21 = n[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = tmp + (uint32_t)4U * i + (uint32_t)1U;
+    uint32_t t10 = res[4U * i + 1U];
+    uint32_t t21 = n[4U * i + 1U];
+    uint32_t *res_i1 = tmp + 4U * i + 1U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t10, t21, res_i1);
-    uint32_t t11 = res[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t t22 = n[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = tmp + (uint32_t)4U * i + (uint32_t)2U;
+    uint32_t t11 = res[4U * i + 2U];
+    uint32_t t22 = n[4U * i + 2U];
+    uint32_t *res_i2 = tmp + 4U * i + 2U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t11, t22, res_i2);
-    uint32_t t12 = res[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t t2 = n[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = tmp + (uint32_t)4U * i + (uint32_t)3U;
+    uint32_t t12 = res[4U * i + 3U];
+    uint32_t t2 = n[4U * i + 3U];
+    uint32_t *res_i = tmp + 4U * i + 3U;
     c1 = Lib_IntTypes_Intrinsics_sub_borrow_u32(c1, t12, t2, res_i);
   }
   uint32_t c10 = c1;
   uint32_t c2 = c00 - c10;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (c2 & res[i]) | (~c2 & tmp[i]);
@@ -349,46 +349,46 @@ static inline void reduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *
 static inline void from(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *a)
 {
   uint32_t tmp[256U] = { 0U };
-  memcpy(tmp, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(tmp, aM, 128U * sizeof (uint32_t));
   reduction(n, nInv_u64, tmp, a);
 }
 
 static inline void areduction(uint32_t *n, uint32_t nInv, uint32_t *c, uint32_t *res)
 {
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = nInv * c[i0];
     uint32_t *res_j0 = c + i0;
-    uint32_t c1 = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c1 = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c1, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c1, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c1, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c1 = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c1, res_i);
     }
     uint32_t r = c1;
     uint32_t c10 = r;
-    uint32_t *resb = c + (uint32_t)128U + i0;
-    uint32_t res_j = c[(uint32_t)128U + i0];
+    uint32_t *resb = c + 128U + i0;
+    uint32_t res_j = c[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c10, res_j, resb);
   }
-  memcpy(res, c + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(res, c + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
   uint32_t c1 = Hacl_Bignum4096_32_sub(res, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = res;
     uint32_t x = (m & tmp[i]) | (~m & res[i]);
@@ -401,7 +401,7 @@ amont_mul(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint32_t *bM, uint32_t *
 {
   uint32_t c[256U] = { 0U };
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32((uint32_t)128U, aM, bM, tmp, c);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint32(128U, aM, bM, tmp, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -409,7 +409,7 @@ static inline void amont_sqr(uint32_t *n, uint32_t nInv_u64, uint32_t *aM, uint3
 {
   uint32_t c[256U] = { 0U };
   uint32_t tmp[512U] = { 0U };
-  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32((uint32_t)128U, aM, tmp, c);
+  Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint32(128U, aM, tmp, c);
   areduction(n, nInv_u64, c, resM);
 }
 
@@ -418,41 +418,41 @@ bn_slow_precomp(uint32_t *n, uint32_t mu, uint32_t *r2, uint32_t *a, uint32_t *r
 {
   uint32_t a_mod[128U] = { 0U };
   uint32_t a1[256U] = { 0U };
-  memcpy(a1, a, (uint32_t)256U * sizeof (uint32_t));
-  uint32_t c0 = (uint32_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)128U; i0++)
+  memcpy(a1, a, 256U * sizeof (uint32_t));
+  uint32_t c0 = 0U;
+  for (uint32_t i0 = 0U; i0 < 128U; i0++)
   {
     uint32_t qj = mu * a1[i0];
     uint32_t *res_j0 = a1 + i0;
-    uint32_t c = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint32_t c = 0U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
-      uint32_t a_i = n[(uint32_t)4U * i];
-      uint32_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint32_t a_i = n[4U * i];
+      uint32_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i, qj, c, res_i0);
-      uint32_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint32_t a_i0 = n[4U * i + 1U];
+      uint32_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i0, qj, c, res_i1);
-      uint32_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint32_t a_i1 = n[4U * i + 2U];
+      uint32_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i1, qj, c, res_i2);
-      uint32_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint32_t a_i2 = n[4U * i + 3U];
+      uint32_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u32(a_i2, qj, c, res_i);
     }
     uint32_t r = c;
     uint32_t c1 = r;
-    uint32_t *resb = a1 + (uint32_t)128U + i0;
-    uint32_t res_j = a1[(uint32_t)128U + i0];
+    uint32_t *resb = a1 + 128U + i0;
+    uint32_t res_j = a1[128U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u32(c0, c1, res_j, resb);
   }
-  memcpy(a_mod, a1 + (uint32_t)128U, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(a_mod, a1 + 128U, 128U * sizeof (uint32_t));
   uint32_t c00 = c0;
   uint32_t tmp[128U] = { 0U };
   uint32_t c1 = Hacl_Bignum4096_32_sub(a_mod, n, tmp);
-  KRML_HOST_IGNORE(c1);
-  uint32_t m = (uint32_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint32_t m = 0U - c00;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t *os = a_mod;
     uint32_t x = (m & tmp[i]) | (~m & a_mod[i]);
@@ -477,21 +477,21 @@ Write `a mod n` in `res`.
 bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc;
   uint32_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t r2[128U] = { 0U };
     precompr2(nBits, n, r2);
@@ -500,65 +500,65 @@ bool Hacl_Bignum4096_32_mod(uint32_t *n, uint32_t *a, uint32_t *res)
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 static uint32_t exp_check(uint32_t *n, uint32_t *a, uint32_t bBits, uint32_t *b)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m10 = acc0;
   uint32_t m00 = m0 & m10;
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t m1;
-  if (bBits < (uint32_t)32U * bLen)
+  if (bBits < 32U * bLen)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), bLen);
     uint32_t *b2 = (uint32_t *)alloca(bLen * sizeof (uint32_t));
     memset(b2, 0U, bLen * sizeof (uint32_t));
-    uint32_t i0 = bBits / (uint32_t)32U;
-    uint32_t j = bBits % (uint32_t)32U;
-    b2[i0] = b2[i0] | (uint32_t)1U << j;
-    uint32_t acc = (uint32_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+    uint32_t i0 = bBits / 32U;
+    uint32_t j = bBits % 32U;
+    b2[i0] = b2[i0] | 1U << j;
+    uint32_t acc = 0U;
+    for (uint32_t i = 0U; i < bLen; i++)
     {
       uint32_t beq = FStar_UInt32_eq_mask(b[i], b2[i]);
       uint32_t blt = ~FStar_UInt32_gte_mask(b[i], b2[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
     }
     uint32_t res = acc;
     m1 = res;
   }
   else
   {
-    m1 = (uint32_t)0xFFFFFFFFU;
+    m1 = 0xFFFFFFFFU;
   }
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t m = m1 & m2;
@@ -576,7 +576,7 @@ exp_vartime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[128U] = { 0U };
     uint32_t c[256U] = { 0U };
@@ -584,18 +584,18 @@ exp_vartime_precomp(
     reduction(n, mu, c, aM);
     uint32_t resM[128U] = { 0U };
     uint32_t ctx[256U] = { 0U };
-    memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+    memcpy(ctx, n, 128U * sizeof (uint32_t));
+    memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         amont_mul(ctx_n0, mu, resM, aM, resM);
@@ -604,7 +604,7 @@ exp_vartime_precomp(
       amont_sqr(ctx_n0, mu, aM, aM);
     }
     uint32_t tmp[256U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t));
+    memcpy(tmp, resM, 128U * sizeof (uint32_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -614,74 +614,70 @@ exp_vartime_precomp(
   reduction(n, mu, c, aM);
   uint32_t resM[128U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[256U] = { 0U };
-  memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(ctx, n, 128U * sizeof (uint32_t));
+  memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
   uint32_t table[2048U] = { 0U };
   uint32_t tmp[128U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)128U;
+  uint32_t *t1 = table + 128U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)128U;
+  uint32_t *ctx_r20 = ctx + 128U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(t1, aM, 128U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 128U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U;
+    memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 128U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+    uint32_t i = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
     uint32_t bits_l32 = bits_c;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)128U;
-    memcpy(resM, (uint32_t *)a_bits_l, (uint32_t)128U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 128U;
+    memcpy(resM, (uint32_t *)a_bits_l, 128U * sizeof (uint32_t));
   }
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[128U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+  for (uint32_t i = 0U; i < bBits / 4U; i++)
   {
     KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
+    uint32_t k = bBits - bBits % 4U - 4U * i - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
     uint32_t bits_l32 = bits_l;
-    const uint32_t *a_bits_l = table + bits_l32 * (uint32_t)128U;
-    memcpy(tmp0, (uint32_t *)a_bits_l, (uint32_t)128U * sizeof (uint32_t));
+    const uint32_t *a_bits_l = table + bits_l32 * 128U;
+    memcpy(tmp0, (uint32_t *)a_bits_l, 128U * sizeof (uint32_t));
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint32_t tmp1[256U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(tmp1, resM, 128U * sizeof (uint32_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -696,7 +692,7 @@ exp_consttime_precomp(
   uint32_t *res
 )
 {
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     uint32_t aM[128U] = { 0U };
     uint32_t c[256U] = { 0U };
@@ -704,22 +700,22 @@ exp_consttime_precomp(
     reduction(n, mu, c, aM);
     uint32_t resM[128U] = { 0U };
     uint32_t ctx[256U] = { 0U };
-    memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-    memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    memcpy(ctx, n, 128U * sizeof (uint32_t));
+    memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      for (uint32_t i = 0U; i < 128U; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aM[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aM[i]);
         resM[i] = resM[i] ^ dummy;
         aM[i] = aM[i] ^ dummy;
       }
@@ -730,14 +726,14 @@ exp_consttime_precomp(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+    for (uint32_t i = 0U; i < 128U; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aM[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aM[i]);
       resM[i] = resM[i] ^ dummy;
       aM[i] = aM[i] ^ dummy;
     }
     uint32_t tmp[256U] = { 0U };
-    memcpy(tmp, resM, (uint32_t)128U * sizeof (uint32_t));
+    memcpy(tmp, resM, 128U * sizeof (uint32_t));
     reduction(n, mu, tmp, res);
     return;
   }
@@ -747,53 +743,49 @@ exp_consttime_precomp(
   reduction(n, mu, c0, aM);
   uint32_t resM[128U] = { 0U };
   uint32_t bLen;
-  if (bBits == (uint32_t)0U)
+  if (bBits == 0U)
   {
-    bLen = (uint32_t)1U;
+    bLen = 1U;
   }
   else
   {
-    bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+    bLen = (bBits - 1U) / 32U + 1U;
   }
   uint32_t ctx[256U] = { 0U };
-  memcpy(ctx, n, (uint32_t)128U * sizeof (uint32_t));
-  memcpy(ctx + (uint32_t)128U, r2, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(ctx, n, 128U * sizeof (uint32_t));
+  memcpy(ctx + 128U, r2, 128U * sizeof (uint32_t));
   uint32_t table[2048U] = { 0U };
   uint32_t tmp[128U] = { 0U };
   uint32_t *t0 = table;
-  uint32_t *t1 = table + (uint32_t)128U;
+  uint32_t *t1 = table + 128U;
   uint32_t *ctx_n0 = ctx;
-  uint32_t *ctx_r20 = ctx + (uint32_t)128U;
+  uint32_t *ctx_r20 = ctx + 128U;
   from(ctx_n0, mu, ctx_r20, t0);
-  memcpy(t1, aM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(t1, aM, 128U * sizeof (uint32_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint32_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)128U;
+    0U,
+    7U,
+    1U,
+    uint32_t *t11 = table + (i + 1U) * 128U;
     uint32_t *ctx_n1 = ctx;
     amont_sqr(ctx_n1, mu, t11, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t));
-    uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)128U;
+    memcpy(table + (2U * i + 2U) * 128U, tmp, 128U * sizeof (uint32_t));
+    uint32_t *t2 = table + (2U * i + 2U) * 128U;
     uint32_t *ctx_n = ctx;
     amont_mul(ctx_n, mu, aM, t2, tmp);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)128U,
-      tmp,
-      (uint32_t)128U * sizeof (uint32_t)););
-  if (bBits % (uint32_t)4U != (uint32_t)0U)
+    memcpy(table + (2U * i + 3U) * 128U, tmp, 128U * sizeof (uint32_t)););
+  if (bBits % 4U != 0U)
   {
-    uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-    memcpy(resM, (uint32_t *)table, (uint32_t)128U * sizeof (uint32_t));
+    uint32_t i0 = bBits / 4U * 4U;
+    uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+    memcpy(resM, (uint32_t *)table, 128U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)128U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 128U;
+      for (uint32_t i = 0U; i < 128U; i++)
       {
         uint32_t *os = resM;
         uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -803,28 +795,28 @@ exp_consttime_precomp(
   else
   {
     uint32_t *ctx_n = ctx;
-    uint32_t *ctx_r2 = ctx + (uint32_t)128U;
+    uint32_t *ctx_r2 = ctx + 128U;
     from(ctx_n, mu, ctx_r2, resM);
   }
   uint32_t tmp0[128U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+  for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
   {
     KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
+      0U,
+      4U,
+      1U,
       uint32_t *ctx_n = ctx;
       amont_sqr(ctx_n, mu, resM, resM););
-    uint32_t k = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, (uint32_t)4U);
-    memcpy(tmp0, (uint32_t *)table, (uint32_t)128U * sizeof (uint32_t));
+    uint32_t k = bBits - bBits % 4U - 4U * i0 - 4U;
+    uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k, 4U);
+    memcpy(tmp0, (uint32_t *)table, 128U * sizeof (uint32_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-      const uint32_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)128U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+      0U,
+      15U,
+      1U,
+      uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+      const uint32_t *res_j = table + (i1 + 1U) * 128U;
+      for (uint32_t i = 0U; i < 128U; i++)
       {
         uint32_t *os = tmp0;
         uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -834,7 +826,7 @@ exp_consttime_precomp(
     amont_mul(ctx_n, mu, resM, tmp0, resM);
   }
   uint32_t tmp1[256U] = { 0U };
-  memcpy(tmp1, resM, (uint32_t)128U * sizeof (uint32_t));
+  memcpy(tmp1, resM, 128U * sizeof (uint32_t));
   reduction(n, mu, tmp1, res);
 }
 
@@ -900,16 +892,16 @@ Hacl_Bignum4096_32_mod_exp_vartime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_vartime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -942,16 +934,16 @@ Hacl_Bignum4096_32_mod_exp_consttime(
 )
 {
   uint32_t is_valid_m = exp_check(n, a, bBits, b);
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     exp_consttime(nBits, n, a, bBits, b, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 /**
@@ -972,22 +964,22 @@ Write `a ^ (-1) mod n` in `res`.
 bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t *res)
 {
   uint32_t one[128U] = { 0U };
-  memset(one, 0U, (uint32_t)128U * sizeof (uint32_t));
-  one[0U] = (uint32_t)1U;
-  uint32_t bit0 = n[0U] & (uint32_t)1U;
-  uint32_t m0 = (uint32_t)0U - bit0;
-  uint32_t acc0 = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  memset(one, 0U, 128U * sizeof (uint32_t));
+  one[0U] = 1U;
+  uint32_t bit0 = n[0U] & 1U;
+  uint32_t m0 = 0U - bit0;
+  uint32_t acc0 = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(one[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m1 = acc0;
   uint32_t m00 = m0 & m1;
   uint32_t bn_zero[128U] = { 0U };
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -995,55 +987,55 @@ bool Hacl_Bignum4096_32_mod_inv_prime_vartime(uint32_t *n, uint32_t *a, uint32_t
   uint32_t mask1 = mask;
   uint32_t res10 = mask1;
   uint32_t m10 = res10;
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], n[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   uint32_t m2 = acc;
   uint32_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
-  if (is_valid_m == (uint32_t)0xFFFFFFFFU)
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
+  if (is_valid_m == 0xFFFFFFFFU)
   {
     uint32_t n2[128U] = { 0U };
-    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, n[0U], (uint32_t)2U, n2);
-    uint32_t *a1 = n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, n[0U], 2U, n2);
+    uint32_t *a1 = n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)31U; i++)
+    for (uint32_t i = 0U; i < 31U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
     KRML_MAYBE_FOR3(i,
-      (uint32_t)124U,
-      (uint32_t)127U,
-      (uint32_t)1U,
+      124U,
+      127U,
+      1U,
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
     uint32_t c1 = c;
     uint32_t c2 = c1;
-    KRML_HOST_IGNORE(c2);
-    exp_vartime(nBits, n, a, (uint32_t)4096U, n2, res);
+    KRML_MAYBE_UNUSED_VAR(c2);
+    exp_vartime(nBits, n, a, 4096U, n2, res);
   }
   else
   {
-    memset(res, 0U, (uint32_t)128U * sizeof (uint32_t));
+    memset(res, 0U, 128U * sizeof (uint32_t));
   }
-  return is_valid_m == (uint32_t)0xFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFU;
 }
 
 
@@ -1067,16 +1059,16 @@ Heap-allocate and initialize a montgomery context.
 */
 Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *Hacl_Bignum4096_32_mont_ctx_init(uint32_t *n)
 {
-  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t));
-  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint32_t));
+  uint32_t *r2 = (uint32_t *)KRML_HOST_CALLOC(128U, sizeof (uint32_t));
+  uint32_t *n1 = (uint32_t *)KRML_HOST_CALLOC(128U, sizeof (uint32_t));
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
-  memcpy(n11, n, (uint32_t)128U * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32((uint32_t)128U, n);
+  memcpy(n11, n, 128U * sizeof (uint32_t));
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(128U, n);
   precompr2(nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
-  res = { .len = (uint32_t)128U, .n = n11, .mu = mu, .r2 = r21 };
+  res = { .len = 128U, .n = n11, .mu = mu, .r2 = r21 };
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   *buf =
     (Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 *)KRML_HOST_MALLOC(sizeof (
@@ -1204,36 +1196,36 @@ Hacl_Bignum4096_32_mod_inv_prime_vartime_precomp(
 {
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 k1 = *k;
   uint32_t n2[128U] = { 0U };
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
-  uint32_t *a1 = k1.n + (uint32_t)1U;
-  uint32_t *res1 = n2 + (uint32_t)1U;
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
+  uint32_t *a1 = k1.n + 1U;
+  uint32_t *res1 = n2 + 1U;
   uint32_t c = c0;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)31U; i++)
+  for (uint32_t i = 0U; i < 31U; i++)
   {
-    uint32_t t1 = a1[(uint32_t)4U * i];
-    uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-    uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-    uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-    uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-    uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-    uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-    uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+    uint32_t t1 = a1[4U * i];
+    uint32_t *res_i0 = res1 + 4U * i;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+    uint32_t t10 = a1[4U * i + 1U];
+    uint32_t *res_i1 = res1 + 4U * i + 1U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+    uint32_t t11 = a1[4U * i + 2U];
+    uint32_t *res_i2 = res1 + 4U * i + 2U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+    uint32_t t12 = a1[4U * i + 3U];
+    uint32_t *res_i = res1 + 4U * i + 3U;
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
   }
   KRML_MAYBE_FOR3(i,
-    (uint32_t)124U,
-    (uint32_t)127U,
-    (uint32_t)1U,
+    124U,
+    127U,
+    1U,
     uint32_t t1 = a1[i];
     uint32_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i););
   uint32_t c1 = c;
   uint32_t c2 = c1;
-  KRML_HOST_IGNORE(c2);
-  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, (uint32_t)4096U, n2, res);
+  KRML_MAYBE_UNUSED_VAR(c2);
+  exp_vartime_precomp(k1.n, k1.mu, k1.r2, a, 4096U, n2, res);
 }
 
 
@@ -1255,36 +1247,28 @@ Load a bid-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint32_t *os = res2;
-    uint32_t u = load32_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)4U);
+    uint32_t u = load32_be(tmp + (bnLen - i - 1U) * 4U);
     uint32_t x = u;
     os[i] = x;
   }
@@ -1304,36 +1288,28 @@ Load a little-endian bignum from memory.
 */
 uint32_t *Hacl_Bignum4096_32_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U <= (uint32_t)1073741823U)
-  )
+  if (len == 0U || !((len - 1U) / 4U + 1U <= 1073741823U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint32_t), (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U);
-  uint32_t
-  *res =
-    (uint32_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U,
-      sizeof (uint32_t));
+  KRML_CHECK_SIZE(sizeof (uint32_t), (len - 1U) / 4U + 1U);
+  uint32_t *res = (uint32_t *)KRML_HOST_CALLOC((len - 1U) / 4U + 1U, sizeof (uint32_t));
   if (res == NULL)
   {
     return res;
   }
   uint32_t *res1 = res;
   uint32_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)4U * bnLen;
+  uint32_t bnLen = (len - 1U) / 4U + 1U;
+  uint32_t tmpLen = 4U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 4U + 1U; i++)
   {
     uint32_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)4U;
+    uint8_t *bj = tmp + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r1 = u;
     uint32_t x = r1;
@@ -1351,10 +1327,10 @@ Serialize a bignum into big-endian memory.
 void Hacl_Bignum4096_32_bn_to_bytes_be(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 128U; i++)
   {
-    store32_be(res + i * (uint32_t)4U, b[(uint32_t)128U - i - (uint32_t)1U]);
+    store32_be(res + i * 4U, b[128U - i - 1U]);
   }
 }
 
@@ -1367,10 +1343,10 @@ Serialize a bignum into little-endian memory.
 void Hacl_Bignum4096_32_bn_to_bytes_le(uint32_t *b, uint8_t *res)
 {
   uint8_t tmp[512U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  for (uint32_t i = 0U; i < 128U; i++)
   {
-    store32_le(res + i * (uint32_t)4U, b[i]);
+    store32_le(res + i * 4U, b[i]);
   }
 }
 
@@ -1387,12 +1363,12 @@ Returns 2^32 - 1 if a < b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum4096_32_lt_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t acc = (uint32_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t acc = 0U;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t beq = FStar_UInt32_eq_mask(a[i], b[i]);
     uint32_t blt = ~FStar_UInt32_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint32_t)0xFFFFFFFFU) | (~blt & (uint32_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFU) | (~blt & 0U)));
   }
   return acc;
 }
@@ -1404,8 +1380,8 @@ Returns 2^32 - 1 if a = b, otherwise returns 0.
 */
 uint32_t Hacl_Bignum4096_32_eq_mask(uint32_t *a, uint32_t *b)
 {
-  uint32_t mask = (uint32_t)0xFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  uint32_t mask = 0xFFFFFFFFU;
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     uint32_t uu____0 = FStar_UInt32_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/msvc/Hacl_Bignum64.c b/src/msvc/Hacl_Bignum64.c
index 9e701c7b..7d443618 100644
--- a/src/msvc/Hacl_Bignum64.c
+++ b/src/msvc/Hacl_Bignum64.c
@@ -104,9 +104,9 @@ Write `a * b` in `res`.
 */
 void Hacl_Bignum64_mul(uint32_t len, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a, b, tmp, res);
 }
 
@@ -118,9 +118,9 @@ Write `a * a` in `res`.
 */
 void Hacl_Bignum64_sqr(uint32_t len, uint64_t *a, uint64_t *res)
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_sqr_uint64(len, a, tmp, res);
 }
 
@@ -141,28 +141,28 @@ bn_slow_precomp(
   uint64_t *a1 = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(a1, 0U, (len + len) * sizeof (uint64_t));
   memcpy(a1, a, (len + len) * sizeof (uint64_t));
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < len; i0++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i0 = 0U; i0 < len; i0++)
   {
     uint64_t qj = mu * a1[i0];
     uint64_t *res_j0 = a1 + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = n[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * i;
+      uint64_t a_i = n[4U * i];
+      uint64_t *res_i0 = res_j0 + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * i + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * i + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * i + 3U];
+      uint64_t *res_i = res_j0 + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = n[i];
       uint64_t *res_i = res_j0 + i;
@@ -180,9 +180,9 @@ bn_slow_precomp(
   uint64_t *tmp0 = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(tmp0, 0U, len * sizeof (uint64_t));
   uint64_t c1 = Hacl_Bignum_Addition_bn_sub_eq_len_u64(len, a_mod, n, tmp0);
-  KRML_HOST_IGNORE(c1);
-  uint64_t m = (uint64_t)0U - c00;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t m = 0ULL - c00;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t *os = a_mod;
     uint64_t x = (m & tmp0[i]) | (~m & a_mod[i]);
@@ -191,9 +191,9 @@ bn_slow_precomp(
   KRML_CHECK_SIZE(sizeof (uint64_t), len + len);
   uint64_t *c = (uint64_t *)alloca((len + len) * sizeof (uint64_t));
   memset(c, 0U, (len + len) * sizeof (uint64_t));
-  KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)4U * len);
-  uint64_t *tmp = (uint64_t *)alloca((uint32_t)4U * len * sizeof (uint64_t));
-  memset(tmp, 0U, (uint32_t)4U * len * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), 4U * len);
+  uint64_t *tmp = (uint64_t *)alloca(4U * len * sizeof (uint64_t));
+  memset(tmp, 0U, 4U * len * sizeof (uint64_t));
   Hacl_Bignum_Karatsuba_bn_karatsuba_mul_uint64(len, a_mod, r2, tmp, c);
   Hacl_Bignum_Montgomery_bn_mont_reduction_u64(len, n, mu, c, res);
 }
@@ -215,20 +215,20 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res)
   uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   uint64_t is_valid_m = m0 & m1;
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *r2 = (uint64_t *)alloca(len * sizeof (uint64_t));
@@ -241,7 +241,7 @@ bool Hacl_Bignum64_mod(uint32_t len, uint64_t *n, uint64_t *a, uint64_t *res)
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -275,8 +275,8 @@ Hacl_Bignum64_mod_exp_vartime(
 )
 {
   uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, nBits, n, a, bBits, b, res);
   }
@@ -284,7 +284,7 @@ Hacl_Bignum64_mod_exp_vartime(
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -318,8 +318,8 @@ Hacl_Bignum64_mod_exp_consttime(
 )
 {
   uint64_t is_valid_m = Hacl_Bignum_Exponentiation_bn_check_mod_exp_u64(len, n, a, bBits, b);
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_u64(len, nBits, n, a, bBits, b, res);
   }
@@ -327,7 +327,7 @@ Hacl_Bignum64_mod_exp_consttime(
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -352,23 +352,23 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
   uint64_t *one = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
   memset(one, 0U, len * sizeof (uint64_t));
-  one[0U] = (uint64_t)1U;
-  uint64_t bit0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bit0;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  one[0U] = 1ULL;
+  uint64_t bit0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bit0;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(one[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(one[i], n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc0;
   uint64_t m00 = m0 & m1;
   KRML_CHECK_SIZE(sizeof (uint64_t), len);
   uint64_t *bn_zero = (uint64_t *)alloca(len * sizeof (uint64_t));
   memset(bn_zero, 0U, len * sizeof (uint64_t));
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -376,53 +376,48 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
   uint64_t mask1 = mask;
   uint64_t res10 = mask1;
   uint64_t m10 = res10;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m2 = acc;
   uint64_t is_valid_m = (m00 & ~m10) & m2;
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
-  if (is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  if (is_valid_m == 0xFFFFFFFFFFFFFFFFULL)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len);
     uint64_t *n2 = (uint64_t *)alloca(len * sizeof (uint64_t));
     memset(n2, 0U, len * sizeof (uint64_t));
-    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, n[0U], (uint64_t)2U, n2);
+    uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, n[0U], 2ULL, n2);
     uint64_t c1;
-    if ((uint32_t)1U < len)
+    if (1U < len)
     {
-      uint64_t *a1 = n + (uint32_t)1U;
-      uint64_t *res1 = n2 + (uint32_t)1U;
+      uint64_t *a1 = n + 1U;
+      uint64_t *res1 = n2 + 1U;
       uint64_t c = c0;
-      for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)4U; i++)
+      for (uint32_t i = 0U; i < (len - 1U) / 4U; i++)
       {
-        uint64_t t1 = a1[(uint32_t)4U * i];
-        uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-        uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-        uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-        uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-        uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-        uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-        uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+        uint64_t t1 = a1[4U * i];
+        uint64_t *res_i0 = res1 + 4U * i;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+        uint64_t t10 = a1[4U * i + 1U];
+        uint64_t *res_i1 = res1 + 4U * i + 1U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+        uint64_t t11 = a1[4U * i + 2U];
+        uint64_t *res_i2 = res1 + 4U * i + 2U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+        uint64_t t12 = a1[4U * i + 3U];
+        uint64_t *res_i = res1 + 4U * i + 3U;
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
       }
-      for
-      (uint32_t
-        i = (len - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-        i
-        < len - (uint32_t)1U;
-        i++)
+      for (uint32_t i = (len - 1U) / 4U * 4U; i < len - 1U; i++)
       {
         uint64_t t1 = a1[i];
         uint64_t *res_i = res1 + i;
-        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+        c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
       }
       uint64_t c10 = c;
       c1 = c10;
@@ -431,20 +426,14 @@ bool Hacl_Bignum64_mod_inv_prime_vartime(uint32_t len, uint64_t *n, uint64_t *a,
     {
       c1 = c0;
     }
-    KRML_HOST_IGNORE(c1);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len,
-      nBits,
-      n,
-      a,
-      (uint32_t)64U * len,
-      n2,
-      res);
+    KRML_MAYBE_UNUSED_VAR(c1);
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_u64(len, nBits, n, a, 64U * len, n2, res);
   }
   else
   {
     memset(res, 0U, len * sizeof (uint64_t));
   }
-  return is_valid_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_valid_m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -476,7 +465,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint64_t));
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -631,38 +620,33 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t *n2 = (uint64_t *)alloca(len1 * sizeof (uint64_t));
   memset(n2, 0U, len1 * sizeof (uint64_t));
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
   uint64_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint64_t *a1 = k1.n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t *a1 = k1.n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     c1 = c10;
@@ -671,13 +655,13 @@ Hacl_Bignum64_mod_inv_prime_vartime_precomp(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
+  KRML_MAYBE_UNUSED_VAR(c1);
   Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64(len1,
     k1.n,
     k1.mu,
     k1.r2,
     a,
-    (uint32_t)64U * len1,
+    64U * len1,
     n2,
     res);
 }
@@ -701,36 +685,28 @@ Load a bid-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum64_new_bn_from_bytes_be(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp + tmpLen - len, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
     uint64_t *os = res2;
-    uint64_t u = load64_be(tmp + (bnLen - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(tmp + (bnLen - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;
   }
@@ -750,36 +726,28 @@ Load a little-endian bignum from memory.
 */
 uint64_t *Hacl_Bignum64_new_bn_from_bytes_le(uint32_t len, uint8_t *b)
 {
-  if
-  (
-    len
-    == (uint32_t)0U
-    || !((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U <= (uint32_t)536870911U)
-  )
+  if (len == 0U || !((len - 1U) / 8U + 1U <= 536870911U))
   {
     return NULL;
   }
-  KRML_CHECK_SIZE(sizeof (uint64_t), (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U);
-  uint64_t
-  *res =
-    (uint64_t *)KRML_HOST_CALLOC((len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U,
-      sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), (len - 1U) / 8U + 1U);
+  uint64_t *res = (uint64_t *)KRML_HOST_CALLOC((len - 1U) / 8U + 1U, sizeof (uint64_t));
   if (res == NULL)
   {
     return res;
   }
   uint64_t *res1 = res;
   uint64_t *res2 = res1;
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
   memcpy(tmp, b, len * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < (len - 1U) / 8U + 1U; i++)
   {
     uint64_t *os = res2;
-    uint8_t *bj = tmp + i * (uint32_t)8U;
+    uint8_t *bj = tmp + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r1 = u;
     uint64_t x = r1;
@@ -796,14 +764,14 @@ Serialize a bignum into big-endian memory.
 */
 void Hacl_Bignum64_bn_to_bytes_be(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_be(tmp + i * (uint32_t)8U, b[bnLen - i - (uint32_t)1U]);
+    store64_be(tmp + i * 8U, b[bnLen - i - 1U]);
   }
   memcpy(res, tmp + tmpLen - len, len * sizeof (uint8_t));
 }
@@ -816,14 +784,14 @@ Serialize a bignum into little-endian memory.
 */
 void Hacl_Bignum64_bn_to_bytes_le(uint32_t len, uint64_t *b, uint8_t *res)
 {
-  uint32_t bnLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t tmpLen = (uint32_t)8U * bnLen;
+  uint32_t bnLen = (len - 1U) / 8U + 1U;
+  uint32_t tmpLen = 8U * bnLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), tmpLen);
   uint8_t *tmp = (uint8_t *)alloca(tmpLen * sizeof (uint8_t));
   memset(tmp, 0U, tmpLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < bnLen; i++)
+  for (uint32_t i = 0U; i < bnLen; i++)
   {
-    store64_le(tmp + i * (uint32_t)8U, b[i]);
+    store64_le(tmp + i * 8U, b[i]);
   }
   memcpy(res, tmp, len * sizeof (uint8_t));
 }
@@ -841,12 +809,12 @@ Returns 2^64 - 1 if a < b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum64_lt_mask(uint32_t len, uint64_t *a, uint64_t *b)
 {
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(a[i], b[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(a[i], b[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   return acc;
 }
@@ -858,8 +826,8 @@ Returns 2^64 - 1 if a = b, otherwise returns 0.
 */
 uint64_t Hacl_Bignum64_eq_mask(uint32_t len, uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;
diff --git a/src/msvc/Hacl_Chacha20.c b/src/msvc/Hacl_Chacha20.c
index 8966e19e..38a5c373 100644
--- a/src/msvc/Hacl_Chacha20.c
+++ b/src/msvc/Hacl_Chacha20.c
@@ -28,7 +28,7 @@
 const
 uint32_t
 Hacl_Impl_Chacha20_Vec_chacha20_constants[4U] =
-  { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
+  { 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U };
 
 static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
 {
@@ -37,7 +37,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std0 = st[d];
   uint32_t sta10 = sta + stb0;
   uint32_t std10 = std0 ^ sta10;
-  uint32_t std2 = std10 << (uint32_t)16U | std10 >> (uint32_t)16U;
+  uint32_t std2 = std10 << 16U | std10 >> 16U;
   st[a] = sta10;
   st[d] = std2;
   uint32_t sta0 = st[c];
@@ -45,7 +45,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std3 = st[b];
   uint32_t sta11 = sta0 + stb1;
   uint32_t std11 = std3 ^ sta11;
-  uint32_t std20 = std11 << (uint32_t)12U | std11 >> (uint32_t)20U;
+  uint32_t std20 = std11 << 12U | std11 >> 20U;
   st[c] = sta11;
   st[b] = std20;
   uint32_t sta2 = st[a];
@@ -53,7 +53,7 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std4 = st[d];
   uint32_t sta12 = sta2 + stb2;
   uint32_t std12 = std4 ^ sta12;
-  uint32_t std21 = std12 << (uint32_t)8U | std12 >> (uint32_t)24U;
+  uint32_t std21 = std12 << 8U | std12 >> 24U;
   st[a] = sta12;
   st[d] = std21;
   uint32_t sta3 = st[c];
@@ -61,21 +61,21 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t std = st[b];
   uint32_t sta1 = sta3 + stb;
   uint32_t std1 = std ^ sta1;
-  uint32_t std22 = std1 << (uint32_t)7U | std1 >> (uint32_t)25U;
+  uint32_t std22 = std1 << 7U | std1 >> 25U;
   st[c] = sta1;
   st[b] = std22;
 }
 
 static inline void double_round(uint32_t *st)
 {
-  quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)1U, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U);
-  quarter_round(st, (uint32_t)2U, (uint32_t)6U, (uint32_t)10U, (uint32_t)14U);
-  quarter_round(st, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U, (uint32_t)15U);
-  quarter_round(st, (uint32_t)0U, (uint32_t)5U, (uint32_t)10U, (uint32_t)15U);
-  quarter_round(st, (uint32_t)1U, (uint32_t)6U, (uint32_t)11U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)2U, (uint32_t)7U, (uint32_t)8U, (uint32_t)13U);
-  quarter_round(st, (uint32_t)3U, (uint32_t)4U, (uint32_t)9U, (uint32_t)14U);
+  quarter_round(st, 0U, 4U, 8U, 12U);
+  quarter_round(st, 1U, 5U, 9U, 13U);
+  quarter_round(st, 2U, 6U, 10U, 14U);
+  quarter_round(st, 3U, 7U, 11U, 15U);
+  quarter_round(st, 0U, 5U, 10U, 15U);
+  quarter_round(st, 1U, 6U, 11U, 12U);
+  quarter_round(st, 2U, 7U, 8U, 13U);
+  quarter_round(st, 3U, 4U, 9U, 14U);
 }
 
 static inline void rounds(uint32_t *st)
@@ -94,14 +94,14 @@ static inline void rounds(uint32_t *st)
 
 static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
   uint32_t ctr_u32 = ctr;
   k[12U] = k[12U] + ctr_u32;
   rounds(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -110,35 +110,34 @@ static inline void chacha20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 
 static const
 uint32_t
-chacha20_constants[4U] =
-  { (uint32_t)0x61707865U, (uint32_t)0x3320646eU, (uint32_t)0x79622d32U, (uint32_t)0x6b206574U };
+chacha20_constants[4U] = { 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U };
 
 void Hacl_Impl_Chacha20_chacha20_init(uint32_t *ctx, uint8_t *k, uint8_t *n, uint32_t ctr)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx;
     uint32_t x = chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
@@ -151,27 +150,23 @@ static void chacha20_encrypt_block(uint32_t *ctx, uint8_t *out, uint32_t incr, u
   chacha20_core(k, ctx, incr);
   uint32_t bl[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = bl;
-    uint8_t *bj = text + i * (uint32_t)4U;
+    uint8_t *bj = text + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = bl;
     uint32_t x = bl[i] ^ k[i];
     os[i] = x;);
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, bl[i]););
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(out + i * 4U, bl[i]););
 }
 
 static inline void
@@ -186,16 +181,16 @@ chacha20_encrypt_last(uint32_t *ctx, uint32_t len, uint8_t *out, uint32_t incr,
 void
 Hacl_Impl_Chacha20_chacha20_update(uint32_t *ctx, uint32_t len, uint8_t *out, uint8_t *text)
 {
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    chacha20_encrypt_block(ctx, out + i * (uint32_t)64U, i, text + i * (uint32_t)64U);
+    chacha20_encrypt_block(ctx, out + i * 64U, i, text + i * 64U);
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    chacha20_encrypt_last(ctx, rem, out + nb * (uint32_t)64U, nb, text + nb * (uint32_t)64U);
+    chacha20_encrypt_last(ctx, rem, out + nb * 64U, nb, text + nb * 64U);
   }
 }
 
diff --git a/src/msvc/Hacl_Chacha20_Vec128.c b/src/msvc/Hacl_Chacha20_Vec128.c
index 1e0c4ec1..deab1dfc 100644
--- a/src/msvc/Hacl_Chacha20_Vec128.c
+++ b/src/msvc/Hacl_Chacha20_Vec128.c
@@ -32,100 +32,100 @@ static inline void double_round_128(Lib_IntVector_Intrinsics_vec128 *st)
 {
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std0 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std0, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std0, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std1 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std1, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std1, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std2 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std2, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std2, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std3 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std3, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std3, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std4 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std4, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std4, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std5 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std5, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std5, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std6 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std6, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std6, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std7 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std7, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std7, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std8 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std8, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std8, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std9 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std9, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std9, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std10 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std10, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std10, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std11 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std11, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std11, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std12 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std12, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std12, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std13 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std13, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std13, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std14 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std14, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std14, 7U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std15 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std15, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std15, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std16 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std16, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std16, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec128_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec128 std17 = Lib_IntVector_Intrinsics_vec128_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std17, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std17, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec128_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec128 std18 = Lib_IntVector_Intrinsics_vec128_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std18, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std18, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std19 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std19, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std19, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std20 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std20, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std20, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec128_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec128 std21 = Lib_IntVector_Intrinsics_vec128_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std21, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std21, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec128_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec128 std22 = Lib_IntVector_Intrinsics_vec128_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std22, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std22, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std23 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std23, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std23, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std24 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std24, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std24, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec128_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec128 std25 = Lib_IntVector_Intrinsics_vec128_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std25, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std25, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec128_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec128 std26 = Lib_IntVector_Intrinsics_vec128_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std26, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std26, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std27 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std27, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std27, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std28 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std28, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std28, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec128_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec128 std29 = Lib_IntVector_Intrinsics_vec128_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std29, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std29, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec128_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec128 std30 = Lib_IntVector_Intrinsics_vec128_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec128_rotate_left32(std30, 7U);
 }
 
 static inline void
@@ -135,8 +135,8 @@ chacha20_core_128(
   uint32_t ctr
 )
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  uint32_t ctr_u32 = (uint32_t)4U * ctr;
+  memcpy(k, ctx, 16U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  uint32_t ctr_u32 = 4U * ctr;
   Lib_IntVector_Intrinsics_vec128 cv = Lib_IntVector_Intrinsics_vec128_load32(ctr_u32);
   k[12U] = Lib_IntVector_Intrinsics_vec128_add32(k[12U], cv);
   double_round_128(k);
@@ -150,9 +150,9 @@ chacha20_core_128(
   double_round_128(k);
   double_round_128(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = k;
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_add32(k[i], ctx[i]);
     os[i] = x;);
@@ -164,47 +164,42 @@ chacha20_init_128(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *k, uint8_t *n,
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = ctx;
     uint32_t x = ctx1[i];
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_load32(x);
     os[i] = x0;);
-  Lib_IntVector_Intrinsics_vec128
-  ctr1 =
-    Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)0U,
-      (uint32_t)1U,
-      (uint32_t)2U,
-      (uint32_t)3U);
+  Lib_IntVector_Intrinsics_vec128 ctr1 = Lib_IntVector_Intrinsics_vec128_load32s(0U, 1U, 2U, 3U);
   Lib_IntVector_Intrinsics_vec128 c12 = ctx[12U];
   ctx[12U] = Lib_IntVector_Intrinsics_vec128_add32(c12, ctr1);
 }
@@ -221,13 +216,13 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U };
   chacha20_init_128(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)256U;
-  uint32_t nb = len / (uint32_t)256U;
-  uint32_t rem1 = len % (uint32_t)256U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 256U;
+  uint32_t nb = len / 256U;
+  uint32_t rem1 = len % 256U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)256U;
-    uint8_t *uu____1 = text + i * (uint32_t)256U;
+    uint8_t *uu____0 = out + i * 256U;
+    uint8_t *uu____1 = text + i * 256U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, i);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -359,19 +354,19 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * 16U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)256U;
+    uint8_t *uu____2 = out + nb * 256U;
     uint8_t plain[256U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)256U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 256U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -503,13 +498,13 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * 16U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -526,13 +521,13 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[16U] KRML_POST_ALIGN(16) = { 0U };
   chacha20_init_128(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)256U;
-  uint32_t nb = len / (uint32_t)256U;
-  uint32_t rem1 = len % (uint32_t)256U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 256U;
+  uint32_t nb = len / 256U;
+  uint32_t rem1 = len % 256U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)256U;
-    uint8_t *uu____1 = cipher + i * (uint32_t)256U;
+    uint8_t *uu____0 = out + i * 256U;
+    uint8_t *uu____1 = cipher + i * 256U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, i);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -664,19 +659,19 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(uu____1 + i0 * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(uu____0 + i0 * 16U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)256U;
+    uint8_t *uu____2 = out + nb * 256U;
     uint8_t plain[256U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)256U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 256U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U };
     chacha20_core_128(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec128 st0 = k[0U];
@@ -808,13 +803,13 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128(
     k[14U] = v11;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec128
-      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * (uint32_t)16U);
+      x = Lib_IntVector_Intrinsics_vec128_load32_le(plain + i * 16U);
       Lib_IntVector_Intrinsics_vec128 y = Lib_IntVector_Intrinsics_vec128_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * (uint32_t)16U, y););
+      Lib_IntVector_Intrinsics_vec128_store32_le(plain + i * 16U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/msvc/Hacl_Chacha20_Vec256.c b/src/msvc/Hacl_Chacha20_Vec256.c
index 620f5040..e61a7cfe 100644
--- a/src/msvc/Hacl_Chacha20_Vec256.c
+++ b/src/msvc/Hacl_Chacha20_Vec256.c
@@ -32,100 +32,100 @@ static inline void double_round_256(Lib_IntVector_Intrinsics_vec256 *st)
 {
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std0 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std0, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std1 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[0U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std1, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std2 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[8U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std2, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std3 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std3, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std4 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std4, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std5 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[1U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std5, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std6 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[9U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std6, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std7 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std7, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std8 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std8, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std9 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[2U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std9, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std10 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[10U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std10, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std11 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std11, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std12 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std12, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std13 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[3U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std13, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std14 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[11U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std14, 7U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std15 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, (uint32_t)16U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std15, 16U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std16 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, (uint32_t)12U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std16, 12U);
   st[0U] = Lib_IntVector_Intrinsics_vec256_add32(st[0U], st[5U]);
   Lib_IntVector_Intrinsics_vec256 std17 = Lib_IntVector_Intrinsics_vec256_xor(st[15U], st[0U]);
-  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, (uint32_t)8U);
+  st[15U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std17, 8U);
   st[10U] = Lib_IntVector_Intrinsics_vec256_add32(st[10U], st[15U]);
   Lib_IntVector_Intrinsics_vec256 std18 = Lib_IntVector_Intrinsics_vec256_xor(st[5U], st[10U]);
-  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, (uint32_t)7U);
+  st[5U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std18, 7U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std19 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, (uint32_t)16U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std19, 16U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std20 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, (uint32_t)12U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std20, 12U);
   st[1U] = Lib_IntVector_Intrinsics_vec256_add32(st[1U], st[6U]);
   Lib_IntVector_Intrinsics_vec256 std21 = Lib_IntVector_Intrinsics_vec256_xor(st[12U], st[1U]);
-  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, (uint32_t)8U);
+  st[12U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std21, 8U);
   st[11U] = Lib_IntVector_Intrinsics_vec256_add32(st[11U], st[12U]);
   Lib_IntVector_Intrinsics_vec256 std22 = Lib_IntVector_Intrinsics_vec256_xor(st[6U], st[11U]);
-  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, (uint32_t)7U);
+  st[6U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std22, 7U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std23 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, (uint32_t)16U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std23, 16U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std24 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, (uint32_t)12U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std24, 12U);
   st[2U] = Lib_IntVector_Intrinsics_vec256_add32(st[2U], st[7U]);
   Lib_IntVector_Intrinsics_vec256 std25 = Lib_IntVector_Intrinsics_vec256_xor(st[13U], st[2U]);
-  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, (uint32_t)8U);
+  st[13U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std25, 8U);
   st[8U] = Lib_IntVector_Intrinsics_vec256_add32(st[8U], st[13U]);
   Lib_IntVector_Intrinsics_vec256 std26 = Lib_IntVector_Intrinsics_vec256_xor(st[7U], st[8U]);
-  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, (uint32_t)7U);
+  st[7U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std26, 7U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std27 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, (uint32_t)16U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std27, 16U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std28 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, (uint32_t)12U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std28, 12U);
   st[3U] = Lib_IntVector_Intrinsics_vec256_add32(st[3U], st[4U]);
   Lib_IntVector_Intrinsics_vec256 std29 = Lib_IntVector_Intrinsics_vec256_xor(st[14U], st[3U]);
-  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, (uint32_t)8U);
+  st[14U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std29, 8U);
   st[9U] = Lib_IntVector_Intrinsics_vec256_add32(st[9U], st[14U]);
   Lib_IntVector_Intrinsics_vec256 std30 = Lib_IntVector_Intrinsics_vec256_xor(st[4U], st[9U]);
-  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, (uint32_t)7U);
+  st[4U] = Lib_IntVector_Intrinsics_vec256_rotate_left32(std30, 7U);
 }
 
 static inline void
@@ -135,8 +135,8 @@ chacha20_core_256(
   uint32_t ctr
 )
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  uint32_t ctr_u32 = (uint32_t)8U * ctr;
+  memcpy(k, ctx, 16U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  uint32_t ctr_u32 = 8U * ctr;
   Lib_IntVector_Intrinsics_vec256 cv = Lib_IntVector_Intrinsics_vec256_load32(ctr_u32);
   k[12U] = Lib_IntVector_Intrinsics_vec256_add32(k[12U], cv);
   double_round_256(k);
@@ -150,9 +150,9 @@ chacha20_core_256(
   double_round_256(k);
   double_round_256(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = k;
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_add32(k[i], ctx[i]);
     os[i] = x;);
@@ -164,51 +164,43 @@ chacha20_init_256(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *k, uint8_t *n,
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = ctx;
     uint32_t x = ctx1[i];
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_load32(x);
     os[i] = x0;);
   Lib_IntVector_Intrinsics_vec256
-  ctr1 =
-    Lib_IntVector_Intrinsics_vec256_load32s((uint32_t)0U,
-      (uint32_t)1U,
-      (uint32_t)2U,
-      (uint32_t)3U,
-      (uint32_t)4U,
-      (uint32_t)5U,
-      (uint32_t)6U,
-      (uint32_t)7U);
+  ctr1 = Lib_IntVector_Intrinsics_vec256_load32s(0U, 1U, 2U, 3U, 4U, 5U, 6U, 7U);
   Lib_IntVector_Intrinsics_vec256 c12 = ctx[12U];
   ctx[12U] = Lib_IntVector_Intrinsics_vec256_add32(c12, ctr1);
 }
@@ -225,13 +217,13 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U };
   chacha20_init_256(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)512U;
-  uint32_t nb = len / (uint32_t)512U;
-  uint32_t rem1 = len % (uint32_t)512U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 512U;
+  uint32_t nb = len / 512U;
+  uint32_t rem1 = len % 512U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)512U;
-    uint8_t *uu____1 = text + i * (uint32_t)512U;
+    uint8_t *uu____0 = out + i * 512U;
+    uint8_t *uu____1 = text + i * 512U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, i);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -459,19 +451,19 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * 32U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)512U;
+    uint8_t *uu____2 = out + nb * 512U;
     uint8_t plain[512U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)512U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 512U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -699,13 +691,13 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * 32U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -722,13 +714,13 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[16U] KRML_POST_ALIGN(32) = { 0U };
   chacha20_init_256(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)512U;
-  uint32_t nb = len / (uint32_t)512U;
-  uint32_t rem1 = len % (uint32_t)512U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t rem = len % 512U;
+  uint32_t nb = len / 512U;
+  uint32_t rem1 = len % 512U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *uu____0 = out + i * (uint32_t)512U;
-    uint8_t *uu____1 = cipher + i * (uint32_t)512U;
+    uint8_t *uu____0 = out + i * 512U;
+    uint8_t *uu____1 = cipher + i * 512U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, i);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -956,19 +948,19 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i0,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(uu____1 + i0 * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i0]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(uu____0 + i0 * 32U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)512U;
+    uint8_t *uu____2 = out + nb * 512U;
     uint8_t plain[512U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)512U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 512U, rem * sizeof (uint8_t));
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U };
     chacha20_core_256(k, ctx, nb);
     Lib_IntVector_Intrinsics_vec256 st0 = k[0U];
@@ -1196,13 +1188,13 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256(
     k[14U] = v7;
     k[15U] = v15;
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       Lib_IntVector_Intrinsics_vec256
-      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * (uint32_t)32U);
+      x = Lib_IntVector_Intrinsics_vec256_load32_le(plain + i * 32U);
       Lib_IntVector_Intrinsics_vec256 y = Lib_IntVector_Intrinsics_vec256_xor(x, k[i]);
-      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * (uint32_t)32U, y););
+      Lib_IntVector_Intrinsics_vec256_store32_le(plain + i * 32U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/msvc/Hacl_Chacha20_Vec32.c b/src/msvc/Hacl_Chacha20_Vec32.c
index 2bf4764c..0dce915c 100644
--- a/src/msvc/Hacl_Chacha20_Vec32.c
+++ b/src/msvc/Hacl_Chacha20_Vec32.c
@@ -31,106 +31,106 @@ static inline void double_round_32(uint32_t *st)
 {
   st[0U] = st[0U] + st[4U];
   uint32_t std = st[12U] ^ st[0U];
-  st[12U] = std << (uint32_t)16U | std >> (uint32_t)16U;
+  st[12U] = std << 16U | std >> 16U;
   st[8U] = st[8U] + st[12U];
   uint32_t std0 = st[4U] ^ st[8U];
-  st[4U] = std0 << (uint32_t)12U | std0 >> (uint32_t)20U;
+  st[4U] = std0 << 12U | std0 >> 20U;
   st[0U] = st[0U] + st[4U];
   uint32_t std1 = st[12U] ^ st[0U];
-  st[12U] = std1 << (uint32_t)8U | std1 >> (uint32_t)24U;
+  st[12U] = std1 << 8U | std1 >> 24U;
   st[8U] = st[8U] + st[12U];
   uint32_t std2 = st[4U] ^ st[8U];
-  st[4U] = std2 << (uint32_t)7U | std2 >> (uint32_t)25U;
+  st[4U] = std2 << 7U | std2 >> 25U;
   st[1U] = st[1U] + st[5U];
   uint32_t std3 = st[13U] ^ st[1U];
-  st[13U] = std3 << (uint32_t)16U | std3 >> (uint32_t)16U;
+  st[13U] = std3 << 16U | std3 >> 16U;
   st[9U] = st[9U] + st[13U];
   uint32_t std4 = st[5U] ^ st[9U];
-  st[5U] = std4 << (uint32_t)12U | std4 >> (uint32_t)20U;
+  st[5U] = std4 << 12U | std4 >> 20U;
   st[1U] = st[1U] + st[5U];
   uint32_t std5 = st[13U] ^ st[1U];
-  st[13U] = std5 << (uint32_t)8U | std5 >> (uint32_t)24U;
+  st[13U] = std5 << 8U | std5 >> 24U;
   st[9U] = st[9U] + st[13U];
   uint32_t std6 = st[5U] ^ st[9U];
-  st[5U] = std6 << (uint32_t)7U | std6 >> (uint32_t)25U;
+  st[5U] = std6 << 7U | std6 >> 25U;
   st[2U] = st[2U] + st[6U];
   uint32_t std7 = st[14U] ^ st[2U];
-  st[14U] = std7 << (uint32_t)16U | std7 >> (uint32_t)16U;
+  st[14U] = std7 << 16U | std7 >> 16U;
   st[10U] = st[10U] + st[14U];
   uint32_t std8 = st[6U] ^ st[10U];
-  st[6U] = std8 << (uint32_t)12U | std8 >> (uint32_t)20U;
+  st[6U] = std8 << 12U | std8 >> 20U;
   st[2U] = st[2U] + st[6U];
   uint32_t std9 = st[14U] ^ st[2U];
-  st[14U] = std9 << (uint32_t)8U | std9 >> (uint32_t)24U;
+  st[14U] = std9 << 8U | std9 >> 24U;
   st[10U] = st[10U] + st[14U];
   uint32_t std10 = st[6U] ^ st[10U];
-  st[6U] = std10 << (uint32_t)7U | std10 >> (uint32_t)25U;
+  st[6U] = std10 << 7U | std10 >> 25U;
   st[3U] = st[3U] + st[7U];
   uint32_t std11 = st[15U] ^ st[3U];
-  st[15U] = std11 << (uint32_t)16U | std11 >> (uint32_t)16U;
+  st[15U] = std11 << 16U | std11 >> 16U;
   st[11U] = st[11U] + st[15U];
   uint32_t std12 = st[7U] ^ st[11U];
-  st[7U] = std12 << (uint32_t)12U | std12 >> (uint32_t)20U;
+  st[7U] = std12 << 12U | std12 >> 20U;
   st[3U] = st[3U] + st[7U];
   uint32_t std13 = st[15U] ^ st[3U];
-  st[15U] = std13 << (uint32_t)8U | std13 >> (uint32_t)24U;
+  st[15U] = std13 << 8U | std13 >> 24U;
   st[11U] = st[11U] + st[15U];
   uint32_t std14 = st[7U] ^ st[11U];
-  st[7U] = std14 << (uint32_t)7U | std14 >> (uint32_t)25U;
+  st[7U] = std14 << 7U | std14 >> 25U;
   st[0U] = st[0U] + st[5U];
   uint32_t std15 = st[15U] ^ st[0U];
-  st[15U] = std15 << (uint32_t)16U | std15 >> (uint32_t)16U;
+  st[15U] = std15 << 16U | std15 >> 16U;
   st[10U] = st[10U] + st[15U];
   uint32_t std16 = st[5U] ^ st[10U];
-  st[5U] = std16 << (uint32_t)12U | std16 >> (uint32_t)20U;
+  st[5U] = std16 << 12U | std16 >> 20U;
   st[0U] = st[0U] + st[5U];
   uint32_t std17 = st[15U] ^ st[0U];
-  st[15U] = std17 << (uint32_t)8U | std17 >> (uint32_t)24U;
+  st[15U] = std17 << 8U | std17 >> 24U;
   st[10U] = st[10U] + st[15U];
   uint32_t std18 = st[5U] ^ st[10U];
-  st[5U] = std18 << (uint32_t)7U | std18 >> (uint32_t)25U;
+  st[5U] = std18 << 7U | std18 >> 25U;
   st[1U] = st[1U] + st[6U];
   uint32_t std19 = st[12U] ^ st[1U];
-  st[12U] = std19 << (uint32_t)16U | std19 >> (uint32_t)16U;
+  st[12U] = std19 << 16U | std19 >> 16U;
   st[11U] = st[11U] + st[12U];
   uint32_t std20 = st[6U] ^ st[11U];
-  st[6U] = std20 << (uint32_t)12U | std20 >> (uint32_t)20U;
+  st[6U] = std20 << 12U | std20 >> 20U;
   st[1U] = st[1U] + st[6U];
   uint32_t std21 = st[12U] ^ st[1U];
-  st[12U] = std21 << (uint32_t)8U | std21 >> (uint32_t)24U;
+  st[12U] = std21 << 8U | std21 >> 24U;
   st[11U] = st[11U] + st[12U];
   uint32_t std22 = st[6U] ^ st[11U];
-  st[6U] = std22 << (uint32_t)7U | std22 >> (uint32_t)25U;
+  st[6U] = std22 << 7U | std22 >> 25U;
   st[2U] = st[2U] + st[7U];
   uint32_t std23 = st[13U] ^ st[2U];
-  st[13U] = std23 << (uint32_t)16U | std23 >> (uint32_t)16U;
+  st[13U] = std23 << 16U | std23 >> 16U;
   st[8U] = st[8U] + st[13U];
   uint32_t std24 = st[7U] ^ st[8U];
-  st[7U] = std24 << (uint32_t)12U | std24 >> (uint32_t)20U;
+  st[7U] = std24 << 12U | std24 >> 20U;
   st[2U] = st[2U] + st[7U];
   uint32_t std25 = st[13U] ^ st[2U];
-  st[13U] = std25 << (uint32_t)8U | std25 >> (uint32_t)24U;
+  st[13U] = std25 << 8U | std25 >> 24U;
   st[8U] = st[8U] + st[13U];
   uint32_t std26 = st[7U] ^ st[8U];
-  st[7U] = std26 << (uint32_t)7U | std26 >> (uint32_t)25U;
+  st[7U] = std26 << 7U | std26 >> 25U;
   st[3U] = st[3U] + st[4U];
   uint32_t std27 = st[14U] ^ st[3U];
-  st[14U] = std27 << (uint32_t)16U | std27 >> (uint32_t)16U;
+  st[14U] = std27 << 16U | std27 >> 16U;
   st[9U] = st[9U] + st[14U];
   uint32_t std28 = st[4U] ^ st[9U];
-  st[4U] = std28 << (uint32_t)12U | std28 >> (uint32_t)20U;
+  st[4U] = std28 << 12U | std28 >> 20U;
   st[3U] = st[3U] + st[4U];
   uint32_t std29 = st[14U] ^ st[3U];
-  st[14U] = std29 << (uint32_t)8U | std29 >> (uint32_t)24U;
+  st[14U] = std29 << 8U | std29 >> 24U;
   st[9U] = st[9U] + st[14U];
   uint32_t std30 = st[4U] ^ st[9U];
-  st[4U] = std30 << (uint32_t)7U | std30 >> (uint32_t)25U;
+  st[4U] = std30 << 7U | std30 >> 25U;
 }
 
 static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t ctr_u32 = (uint32_t)1U * ctr;
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
+  uint32_t ctr_u32 = 1U * ctr;
   uint32_t cv = ctr_u32;
   k[12U] = k[12U] + cv;
   double_round_32(k);
@@ -144,9 +144,9 @@ static inline void chacha20_core_32(uint32_t *k, uint32_t *ctx, uint32_t ctr)
   double_round_32(k);
   double_round_32(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -157,41 +157,41 @@ static inline void chacha20_init_32(uint32_t *ctx, uint8_t *k, uint8_t *n, uint3
 {
   uint32_t ctx1[16U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = ctx1;
     uint32_t x = Hacl_Impl_Chacha20_Vec_chacha20_constants[i];
     os[i] = x;);
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)4U;
-    uint8_t *bj = k + i * (uint32_t)4U;
+    0U,
+    8U,
+    1U,
+    uint32_t *os = ctx1 + 4U;
+    uint8_t *bj = k + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   ctx1[12U] = ctr;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
-    uint32_t *os = ctx1 + (uint32_t)13U;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    0U,
+    3U,
+    1U,
+    uint32_t *os = ctx1 + 13U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = ctx;
     uint32_t x = ctx1[i];
     os[i] = x;);
-  uint32_t ctr1 = (uint32_t)0U;
+  uint32_t ctr1 = 0U;
   uint32_t c12 = ctx[12U];
   ctx[12U] = c12 + ctr1;
 }
@@ -208,39 +208,39 @@ Hacl_Chacha20_Vec32_chacha20_encrypt_32(
 {
   uint32_t ctx[16U] = { 0U };
   chacha20_init_32(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = text + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = text + i0 * 64U;
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, i0);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(uu____1 + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(uu____1 + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(uu____0 + i * (uint32_t)4U, y););
+      store32_le(uu____0 + i * 4U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, nb);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(plain + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(plain + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(plain + i * (uint32_t)4U, y););
+      store32_le(plain + i * 4U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -257,39 +257,39 @@ Hacl_Chacha20_Vec32_chacha20_decrypt_32(
 {
   uint32_t ctx[16U] = { 0U };
   chacha20_init_32(ctx, key, n, ctr);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = cipher + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = cipher + i0 * 64U;
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, i0);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(uu____1 + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(uu____1 + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(uu____0 + i * (uint32_t)4U, y););
+      store32_le(uu____0 + i * 4U, y););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k[16U] = { 0U };
     chacha20_core_32(k, ctx, nb);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t u = load32_le(plain + i * (uint32_t)4U);
+      0U,
+      16U,
+      1U,
+      uint32_t u = load32_le(plain + i * 4U);
       uint32_t x = u;
       uint32_t y = x ^ k[i];
-      store32_le(plain + i * (uint32_t)4U, y););
+      store32_le(plain + i * 4U, y););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
diff --git a/src/msvc/Hacl_Curve25519_51.c b/src/msvc/Hacl_Curve25519_51.c
index 64c855cf..ca561e89 100644
--- a/src/msvc/Hacl_Curve25519_51.c
+++ b/src/msvc/Hacl_Curve25519_51.c
@@ -28,38 +28,38 @@
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Bignum25519_51.h"
 
-static const uint8_t g25519[32U] = { (uint8_t)9U };
+static const uint8_t g25519[32U] = { 9U };
 
 static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_uint128 *tmp2)
 {
   uint64_t *nq = p01_tmp1;
-  uint64_t *nq_p1 = p01_tmp1 + (uint32_t)10U;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
+  uint64_t *nq_p1 = p01_tmp1 + 10U;
+  uint64_t *tmp1 = p01_tmp1 + 20U;
   uint64_t *x1 = q;
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)5U;
-  uint64_t *z3 = nq_p1 + (uint32_t)5U;
+  uint64_t *z2 = nq + 5U;
+  uint64_t *z3 = nq_p1 + 5U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)5U;
+  uint64_t *b = tmp1 + 5U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)10U;
+  uint64_t *dc = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
   uint64_t *x3 = nq_p1;
-  uint64_t *z31 = nq_p1 + (uint32_t)5U;
+  uint64_t *z31 = nq_p1 + 5U;
   uint64_t *d0 = dc;
-  uint64_t *c0 = dc + (uint32_t)5U;
+  uint64_t *c0 = dc + 5U;
   Hacl_Impl_Curve25519_Field51_fadd(c0, x3, z31);
   Hacl_Impl_Curve25519_Field51_fsub(d0, x3, z31);
   Hacl_Impl_Curve25519_Field51_fmul2(dc, dc, ab, tmp2);
   Hacl_Impl_Curve25519_Field51_fadd(x3, d0, c0);
   Hacl_Impl_Curve25519_Field51_fsub(z31, d0, c0);
   uint64_t *a1 = tmp1;
-  uint64_t *b1 = tmp1 + (uint32_t)5U;
-  uint64_t *d = tmp1 + (uint32_t)10U;
-  uint64_t *c = tmp1 + (uint32_t)15U;
+  uint64_t *b1 = tmp1 + 5U;
+  uint64_t *d = tmp1 + 10U;
+  uint64_t *c = tmp1 + 15U;
   uint64_t *ab1 = tmp1;
-  uint64_t *dc1 = tmp1 + (uint32_t)10U;
+  uint64_t *dc1 = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fsqr2(dc1, ab1, tmp2);
   Hacl_Impl_Curve25519_Field51_fsqr2(nq_p1, nq_p1, tmp2);
   a1[0U] = c[0U];
@@ -68,7 +68,7 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_
   a1[3U] = c[3U];
   a1[4U] = c[4U];
   Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
-  Hacl_Impl_Curve25519_Field51_fmul1(b1, c, (uint64_t)121665U);
+  Hacl_Impl_Curve25519_Field51_fmul1(b1, c, 121665ULL);
   Hacl_Impl_Curve25519_Field51_fadd(b1, b1, d);
   Hacl_Impl_Curve25519_Field51_fmul2(nq, dc1, ab1, tmp2);
   Hacl_Impl_Curve25519_Field51_fmul(z3, z3, x1, tmp2);
@@ -77,13 +77,13 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, FStar_UInt128_
 static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tmp2)
 {
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)5U;
+  uint64_t *z2 = nq + 5U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)5U;
-  uint64_t *d = tmp1 + (uint32_t)10U;
-  uint64_t *c = tmp1 + (uint32_t)15U;
+  uint64_t *b = tmp1 + 5U;
+  uint64_t *d = tmp1 + 10U;
+  uint64_t *c = tmp1 + 15U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)10U;
+  uint64_t *dc = tmp1 + 10U;
   Hacl_Impl_Curve25519_Field51_fadd(a, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsub(b, x2, z2);
   Hacl_Impl_Curve25519_Field51_fsqr2(dc, ab, tmp2);
@@ -93,7 +93,7 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tm
   a[3U] = c[3U];
   a[4U] = c[4U];
   Hacl_Impl_Curve25519_Field51_fsub(c, d, c);
-  Hacl_Impl_Curve25519_Field51_fmul1(b, c, (uint64_t)121665U);
+  Hacl_Impl_Curve25519_Field51_fmul1(b, c, 121665ULL);
   Hacl_Impl_Curve25519_Field51_fadd(b, b, d);
   Hacl_Impl_Curve25519_Field51_fmul2(nq, dc, ab, tmp2);
 }
@@ -101,46 +101,41 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, FStar_UInt128_uint128 *tm
 static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
 {
   FStar_UInt128_uint128 tmp2[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp2[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp2[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   uint64_t p01_tmp1_swap[41U] = { 0U };
   uint64_t *p0 = p01_tmp1_swap;
   uint64_t *p01 = p01_tmp1_swap;
   uint64_t *p03 = p01;
-  uint64_t *p11 = p01 + (uint32_t)10U;
-  memcpy(p11, init, (uint32_t)10U * sizeof (uint64_t));
+  uint64_t *p11 = p01 + 10U;
+  memcpy(p11, init, 10U * sizeof (uint64_t));
   uint64_t *x0 = p03;
-  uint64_t *z0 = p03 + (uint32_t)5U;
-  x0[0U] = (uint64_t)1U;
-  x0[1U] = (uint64_t)0U;
-  x0[2U] = (uint64_t)0U;
-  x0[3U] = (uint64_t)0U;
-  x0[4U] = (uint64_t)0U;
-  z0[0U] = (uint64_t)0U;
-  z0[1U] = (uint64_t)0U;
-  z0[2U] = (uint64_t)0U;
-  z0[3U] = (uint64_t)0U;
-  z0[4U] = (uint64_t)0U;
+  uint64_t *z0 = p03 + 5U;
+  x0[0U] = 1ULL;
+  x0[1U] = 0ULL;
+  x0[2U] = 0ULL;
+  x0[3U] = 0ULL;
+  x0[4U] = 0ULL;
+  z0[0U] = 0ULL;
+  z0[1U] = 0ULL;
+  z0[2U] = 0ULL;
+  z0[3U] = 0ULL;
+  z0[4U] = 0ULL;
   uint64_t *p01_tmp1 = p01_tmp1_swap;
   uint64_t *p01_tmp11 = p01_tmp1_swap;
   uint64_t *nq1 = p01_tmp1_swap;
-  uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)10U;
-  uint64_t *swap = p01_tmp1_swap + (uint32_t)40U;
-  Hacl_Impl_Curve25519_Field51_cswap2((uint64_t)1U, nq1, nq_p11);
+  uint64_t *nq_p11 = p01_tmp1_swap + 10U;
+  uint64_t *swap = p01_tmp1_swap + 40U;
+  Hacl_Impl_Curve25519_Field51_cswap2(1ULL, nq1, nq_p11);
   point_add_and_double(init, p01_tmp11, tmp2);
-  swap[0U] = (uint64_t)1U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++)
+  swap[0U] = 1ULL;
+  for (uint32_t i = 0U; i < 251U; i++)
   {
     uint64_t *p01_tmp12 = p01_tmp1_swap;
-    uint64_t *swap1 = p01_tmp1_swap + (uint32_t)40U;
+    uint64_t *swap1 = p01_tmp1_swap + 40U;
     uint64_t *nq2 = p01_tmp12;
-    uint64_t *nq_p12 = p01_tmp12 + (uint32_t)10U;
-    uint64_t
-    bit =
-      (uint64_t)(key[((uint32_t)253U - i)
-      / (uint32_t)8U]
-      >> ((uint32_t)253U - i) % (uint32_t)8U
-      & (uint8_t)1U);
+    uint64_t *nq_p12 = p01_tmp12 + 10U;
+    uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U);
     uint64_t sw = swap1[0U] ^ bit;
     Hacl_Impl_Curve25519_Field51_cswap2(sw, nq2, nq_p12);
     point_add_and_double(init, p01_tmp12, tmp2);
@@ -149,11 +144,11 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t sw = swap[0U];
   Hacl_Impl_Curve25519_Field51_cswap2(sw, nq1, nq_p11);
   uint64_t *nq10 = p01_tmp1;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)20U;
+  uint64_t *tmp1 = p01_tmp1 + 20U;
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
-  memcpy(out, p0, (uint32_t)10U * sizeof (uint64_t));
+  memcpy(out, p0, 10U * sizeof (uint64_t));
 }
 
 void
@@ -165,7 +160,7 @@ Hacl_Curve25519_51_fsquare_times(
 )
 {
   Hacl_Impl_Curve25519_Field51_fsqr(o, inp, tmp);
-  for (uint32_t i = (uint32_t)0U; i < n - (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < n - 1U; i++)
   {
     Hacl_Impl_Curve25519_Field51_fsqr(o, o, tmp);
   }
@@ -175,60 +170,56 @@ void Hacl_Curve25519_51_finv(uint64_t *o, uint64_t *i, FStar_UInt128_uint128 *tm
 {
   uint64_t t1[20U] = { 0U };
   uint64_t *a1 = t1;
-  uint64_t *b1 = t1 + (uint32_t)5U;
-  uint64_t *t010 = t1 + (uint32_t)15U;
+  uint64_t *b1 = t1 + 5U;
+  uint64_t *t010 = t1 + 15U;
   FStar_UInt128_uint128 *tmp10 = tmp;
-  Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, (uint32_t)1U);
-  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)2U);
+  Hacl_Curve25519_51_fsquare_times(a1, i, tmp10, 1U);
+  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 2U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, i, tmp);
   Hacl_Impl_Curve25519_Field51_fmul(a1, b1, a1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, (uint32_t)1U);
+  Hacl_Curve25519_51_fsquare_times(t010, a1, tmp10, 1U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t010, b1, tmp10, (uint32_t)5U);
+  Hacl_Curve25519_51_fsquare_times(t010, b1, tmp10, 5U);
   Hacl_Impl_Curve25519_Field51_fmul(b1, t010, b1, tmp);
-  uint64_t *b10 = t1 + (uint32_t)5U;
-  uint64_t *c10 = t1 + (uint32_t)10U;
-  uint64_t *t011 = t1 + (uint32_t)15U;
+  uint64_t *b10 = t1 + 5U;
+  uint64_t *c10 = t1 + 10U;
+  uint64_t *t011 = t1 + 15U;
   FStar_UInt128_uint128 *tmp11 = tmp;
-  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, (uint32_t)10U);
+  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 10U);
   Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, (uint32_t)20U);
+  Hacl_Curve25519_51_fsquare_times(t011, c10, tmp11, 20U);
   Hacl_Impl_Curve25519_Field51_fmul(t011, t011, c10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, (uint32_t)10U);
+  Hacl_Curve25519_51_fsquare_times(t011, t011, tmp11, 10U);
   Hacl_Impl_Curve25519_Field51_fmul(b10, t011, b10, tmp);
-  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, (uint32_t)50U);
+  Hacl_Curve25519_51_fsquare_times(t011, b10, tmp11, 50U);
   Hacl_Impl_Curve25519_Field51_fmul(c10, t011, b10, tmp);
-  uint64_t *b11 = t1 + (uint32_t)5U;
-  uint64_t *c1 = t1 + (uint32_t)10U;
-  uint64_t *t01 = t1 + (uint32_t)15U;
+  uint64_t *b11 = t1 + 5U;
+  uint64_t *c1 = t1 + 10U;
+  uint64_t *t01 = t1 + 15U;
   FStar_UInt128_uint128 *tmp1 = tmp;
-  Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, (uint32_t)100U);
+  Hacl_Curve25519_51_fsquare_times(t01, c1, tmp1, 100U);
   Hacl_Impl_Curve25519_Field51_fmul(t01, t01, c1, tmp);
-  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)50U);
+  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 50U);
   Hacl_Impl_Curve25519_Field51_fmul(t01, t01, b11, tmp);
-  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, (uint32_t)5U);
+  Hacl_Curve25519_51_fsquare_times(t01, t01, tmp1, 5U);
   uint64_t *a = t1;
-  uint64_t *t0 = t1 + (uint32_t)15U;
+  uint64_t *t0 = t1 + 15U;
   Hacl_Impl_Curve25519_Field51_fmul(o, t0, a, tmp);
 }
 
 static void encode_point(uint8_t *o, uint64_t *i)
 {
   uint64_t *x = i;
-  uint64_t *z = i + (uint32_t)5U;
+  uint64_t *z = i + 5U;
   uint64_t tmp[5U] = { 0U };
   uint64_t u64s[4U] = { 0U };
   FStar_UInt128_uint128 tmp_w[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp_w[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp_w[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_finv(tmp, z, tmp_w);
   Hacl_Impl_Curve25519_Field51_fmul(tmp, tmp, x, tmp_w);
   Hacl_Impl_Curve25519_Field51_store_felem(u64s, tmp);
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(o + i0 * (uint32_t)8U, u64s[i0]););
+  KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]););
 }
 
 /**
@@ -243,32 +234,32 @@ void Hacl_Curve25519_51_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
   uint64_t init[10U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = pub + i * (uint32_t)8U;
+    uint8_t *bj = pub + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t tmp3 = tmp[3U];
-  tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
+  tmp[3U] = tmp3 & 0x7fffffffffffffffULL;
   uint64_t *x = init;
-  uint64_t *z = init + (uint32_t)5U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
-  z[4U] = (uint64_t)0U;
-  uint64_t f0l = tmp[0U] & (uint64_t)0x7ffffffffffffU;
-  uint64_t f0h = tmp[0U] >> (uint32_t)51U;
-  uint64_t f1l = (tmp[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U;
-  uint64_t f1h = tmp[1U] >> (uint32_t)38U;
-  uint64_t f2l = (tmp[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U;
-  uint64_t f2h = tmp[2U] >> (uint32_t)25U;
-  uint64_t f3l = (tmp[3U] & (uint64_t)0xfffU) << (uint32_t)39U;
-  uint64_t f3h = tmp[3U] >> (uint32_t)12U;
+  uint64_t *z = init + 5U;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
+  z[4U] = 0ULL;
+  uint64_t f0l = tmp[0U] & 0x7ffffffffffffULL;
+  uint64_t f0h = tmp[0U] >> 51U;
+  uint64_t f1l = (tmp[1U] & 0x3fffffffffULL) << 13U;
+  uint64_t f1h = tmp[1U] >> 38U;
+  uint64_t f2l = (tmp[2U] & 0x1ffffffULL) << 26U;
+  uint64_t f2h = tmp[2U] >> 25U;
+  uint64_t f3l = (tmp[3U] & 0xfffULL) << 39U;
+  uint64_t f3h = tmp[3U] >> 12U;
   x[0U] = f0l;
   x[1U] = f0h | f1l;
   x[2U] = f1h | f2l;
@@ -289,7 +280,7 @@ This computes a scalar multiplication of the secret/private key with the curve's
 void Hacl_Curve25519_51_secret_to_public(uint8_t *pub, uint8_t *priv)
 {
   uint8_t basepoint[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = basepoint;
     uint8_t x = g25519[i];
@@ -309,14 +300,14 @@ bool Hacl_Curve25519_51_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
 {
   uint8_t zeros[32U] = { 0U };
   Hacl_Curve25519_51_scalarmult(out, priv, pub);
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  bool r = z == (uint8_t)255U;
+  bool r = z == 255U;
   return !r;
 }
 
diff --git a/src/msvc/Hacl_Curve25519_64.c b/src/msvc/Hacl_Curve25519_64.c
index fb0974fe..edcab306 100644
--- a/src/msvc/Hacl_Curve25519_64.c
+++ b/src/msvc/Hacl_Curve25519_64.c
@@ -35,7 +35,7 @@ static inline void add_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   add_scalar(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(add_scalar_e(out, f1, f2));
+  add_scalar_e(out, f1, f2);
   #endif
 }
 
@@ -44,7 +44,7 @@ static inline void fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fadd(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fadd_e(out, f1, f2));
+  fadd_e(out, f1, f2);
   #endif
 }
 
@@ -53,7 +53,7 @@ static inline void fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsub(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fsub_e(out, f1, f2));
+  fsub_e(out, f1, f2);
   #endif
 }
 
@@ -62,7 +62,7 @@ static inline void fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tm
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul(out, f1, f2, tmp);
   #else
-  KRML_HOST_IGNORE(fmul_e(tmp, f1, out, f2));
+  fmul_e(tmp, f1, out, f2);
   #endif
 }
 
@@ -71,7 +71,7 @@ static inline void fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *t
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul2(out, f1, f2, tmp);
   #else
-  KRML_HOST_IGNORE(fmul2_e(tmp, f1, out, f2));
+  fmul2_e(tmp, f1, out, f2);
   #endif
 }
 
@@ -80,7 +80,7 @@ static inline void fmul_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fmul_scalar(out, f1, f2);
   #else
-  KRML_HOST_IGNORE(fmul_scalar_e(out, f1, f2));
+  fmul_scalar_e(out, f1, f2);
   #endif
 }
 
@@ -89,7 +89,7 @@ static inline void fsqr0(uint64_t *out, uint64_t *f1, uint64_t *tmp)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsqr(out, f1, tmp);
   #else
-  KRML_HOST_IGNORE(fsqr_e(tmp, f1, out));
+  fsqr_e(tmp, f1, out);
   #endif
 }
 
@@ -98,7 +98,7 @@ static inline void fsqr20(uint64_t *out, uint64_t *f, uint64_t *tmp)
   #if HACL_CAN_COMPILE_INLINE_ASM
   fsqr2(out, f, tmp);
   #else
-  KRML_HOST_IGNORE(fsqr2_e(tmp, f, out));
+  fsqr2_e(tmp, f, out);
   #endif
 }
 
@@ -107,42 +107,42 @@ static inline void cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2)
   #if HACL_CAN_COMPILE_INLINE_ASM
   cswap2(bit, p1, p2);
   #else
-  KRML_HOST_IGNORE(cswap2_e(bit, p1, p2));
+  cswap2_e(bit, p1, p2);
   #endif
 }
 
-static const uint8_t g25519[32U] = { (uint8_t)9U };
+static const uint8_t g25519[32U] = { 9U };
 
 static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2)
 {
   uint64_t *nq = p01_tmp1;
-  uint64_t *nq_p1 = p01_tmp1 + (uint32_t)8U;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
+  uint64_t *nq_p1 = p01_tmp1 + 8U;
+  uint64_t *tmp1 = p01_tmp1 + 16U;
   uint64_t *x1 = q;
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)4U;
-  uint64_t *z3 = nq_p1 + (uint32_t)4U;
+  uint64_t *z2 = nq + 4U;
+  uint64_t *z3 = nq_p1 + 4U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)4U;
+  uint64_t *b = tmp1 + 4U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)8U;
+  uint64_t *dc = tmp1 + 8U;
   fadd0(a, x2, z2);
   fsub0(b, x2, z2);
   uint64_t *x3 = nq_p1;
-  uint64_t *z31 = nq_p1 + (uint32_t)4U;
+  uint64_t *z31 = nq_p1 + 4U;
   uint64_t *d0 = dc;
-  uint64_t *c0 = dc + (uint32_t)4U;
+  uint64_t *c0 = dc + 4U;
   fadd0(c0, x3, z31);
   fsub0(d0, x3, z31);
   fmul20(dc, dc, ab, tmp2);
   fadd0(x3, d0, c0);
   fsub0(z31, d0, c0);
   uint64_t *a1 = tmp1;
-  uint64_t *b1 = tmp1 + (uint32_t)4U;
-  uint64_t *d = tmp1 + (uint32_t)8U;
-  uint64_t *c = tmp1 + (uint32_t)12U;
+  uint64_t *b1 = tmp1 + 4U;
+  uint64_t *d = tmp1 + 8U;
+  uint64_t *c = tmp1 + 12U;
   uint64_t *ab1 = tmp1;
-  uint64_t *dc1 = tmp1 + (uint32_t)8U;
+  uint64_t *dc1 = tmp1 + 8U;
   fsqr20(dc1, ab1, tmp2);
   fsqr20(nq_p1, nq_p1, tmp2);
   a1[0U] = c[0U];
@@ -150,7 +150,7 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2
   a1[2U] = c[2U];
   a1[3U] = c[3U];
   fsub0(c, d, c);
-  fmul_scalar0(b1, c, (uint64_t)121665U);
+  fmul_scalar0(b1, c, 121665ULL);
   fadd0(b1, b1, d);
   fmul20(nq, dc1, ab1, tmp2);
   fmul0(z3, z3, x1, tmp2);
@@ -159,13 +159,13 @@ static void point_add_and_double(uint64_t *q, uint64_t *p01_tmp1, uint64_t *tmp2
 static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
 {
   uint64_t *x2 = nq;
-  uint64_t *z2 = nq + (uint32_t)4U;
+  uint64_t *z2 = nq + 4U;
   uint64_t *a = tmp1;
-  uint64_t *b = tmp1 + (uint32_t)4U;
-  uint64_t *d = tmp1 + (uint32_t)8U;
-  uint64_t *c = tmp1 + (uint32_t)12U;
+  uint64_t *b = tmp1 + 4U;
+  uint64_t *d = tmp1 + 8U;
+  uint64_t *c = tmp1 + 12U;
   uint64_t *ab = tmp1;
-  uint64_t *dc = tmp1 + (uint32_t)8U;
+  uint64_t *dc = tmp1 + 8U;
   fadd0(a, x2, z2);
   fsub0(b, x2, z2);
   fsqr20(dc, ab, tmp2);
@@ -174,7 +174,7 @@ static void point_double(uint64_t *nq, uint64_t *tmp1, uint64_t *tmp2)
   a[2U] = c[2U];
   a[3U] = c[3U];
   fsub0(c, d, c);
-  fmul_scalar0(b, c, (uint64_t)121665U);
+  fmul_scalar0(b, c, 121665ULL);
   fadd0(b, b, d);
   fmul20(nq, dc, ab, tmp2);
 }
@@ -186,38 +186,33 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t *p0 = p01_tmp1_swap;
   uint64_t *p01 = p01_tmp1_swap;
   uint64_t *p03 = p01;
-  uint64_t *p11 = p01 + (uint32_t)8U;
-  memcpy(p11, init, (uint32_t)8U * sizeof (uint64_t));
+  uint64_t *p11 = p01 + 8U;
+  memcpy(p11, init, 8U * sizeof (uint64_t));
   uint64_t *x0 = p03;
-  uint64_t *z0 = p03 + (uint32_t)4U;
-  x0[0U] = (uint64_t)1U;
-  x0[1U] = (uint64_t)0U;
-  x0[2U] = (uint64_t)0U;
-  x0[3U] = (uint64_t)0U;
-  z0[0U] = (uint64_t)0U;
-  z0[1U] = (uint64_t)0U;
-  z0[2U] = (uint64_t)0U;
-  z0[3U] = (uint64_t)0U;
+  uint64_t *z0 = p03 + 4U;
+  x0[0U] = 1ULL;
+  x0[1U] = 0ULL;
+  x0[2U] = 0ULL;
+  x0[3U] = 0ULL;
+  z0[0U] = 0ULL;
+  z0[1U] = 0ULL;
+  z0[2U] = 0ULL;
+  z0[3U] = 0ULL;
   uint64_t *p01_tmp1 = p01_tmp1_swap;
   uint64_t *p01_tmp11 = p01_tmp1_swap;
   uint64_t *nq1 = p01_tmp1_swap;
-  uint64_t *nq_p11 = p01_tmp1_swap + (uint32_t)8U;
-  uint64_t *swap = p01_tmp1_swap + (uint32_t)32U;
-  cswap20((uint64_t)1U, nq1, nq_p11);
+  uint64_t *nq_p11 = p01_tmp1_swap + 8U;
+  uint64_t *swap = p01_tmp1_swap + 32U;
+  cswap20(1ULL, nq1, nq_p11);
   point_add_and_double(init, p01_tmp11, tmp2);
-  swap[0U] = (uint64_t)1U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)251U; i++)
+  swap[0U] = 1ULL;
+  for (uint32_t i = 0U; i < 251U; i++)
   {
     uint64_t *p01_tmp12 = p01_tmp1_swap;
-    uint64_t *swap1 = p01_tmp1_swap + (uint32_t)32U;
+    uint64_t *swap1 = p01_tmp1_swap + 32U;
     uint64_t *nq2 = p01_tmp12;
-    uint64_t *nq_p12 = p01_tmp12 + (uint32_t)8U;
-    uint64_t
-    bit =
-      (uint64_t)(key[((uint32_t)253U - i)
-      / (uint32_t)8U]
-      >> ((uint32_t)253U - i) % (uint32_t)8U
-      & (uint8_t)1U);
+    uint64_t *nq_p12 = p01_tmp12 + 8U;
+    uint64_t bit = (uint64_t)((uint32_t)key[(253U - i) / 8U] >> (253U - i) % 8U & 1U);
     uint64_t sw = swap1[0U] ^ bit;
     cswap20(sw, nq2, nq_p12);
     point_add_and_double(init, p01_tmp12, tmp2);
@@ -226,17 +221,17 @@ static void montgomery_ladder(uint64_t *out, uint8_t *key, uint64_t *init)
   uint64_t sw = swap[0U];
   cswap20(sw, nq1, nq_p11);
   uint64_t *nq10 = p01_tmp1;
-  uint64_t *tmp1 = p01_tmp1 + (uint32_t)16U;
+  uint64_t *tmp1 = p01_tmp1 + 16U;
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
   point_double(nq10, tmp1, tmp2);
-  memcpy(out, p0, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(out, p0, 8U * sizeof (uint64_t));
 }
 
 static void fsquare_times(uint64_t *o, uint64_t *inp, uint64_t *tmp, uint32_t n)
 {
   fsqr0(o, inp, tmp);
-  for (uint32_t i = (uint32_t)0U; i < n - (uint32_t)1U; i++)
+  for (uint32_t i = 0U; i < n - 1U; i++)
   {
     fsqr0(o, o, tmp);
   }
@@ -246,66 +241,66 @@ static void finv(uint64_t *o, uint64_t *i, uint64_t *tmp)
 {
   uint64_t t1[16U] = { 0U };
   uint64_t *a1 = t1;
-  uint64_t *b1 = t1 + (uint32_t)4U;
-  uint64_t *t010 = t1 + (uint32_t)12U;
+  uint64_t *b1 = t1 + 4U;
+  uint64_t *t010 = t1 + 12U;
   uint64_t *tmp10 = tmp;
-  fsquare_times(a1, i, tmp10, (uint32_t)1U);
-  fsquare_times(t010, a1, tmp10, (uint32_t)2U);
+  fsquare_times(a1, i, tmp10, 1U);
+  fsquare_times(t010, a1, tmp10, 2U);
   fmul0(b1, t010, i, tmp);
   fmul0(a1, b1, a1, tmp);
-  fsquare_times(t010, a1, tmp10, (uint32_t)1U);
+  fsquare_times(t010, a1, tmp10, 1U);
   fmul0(b1, t010, b1, tmp);
-  fsquare_times(t010, b1, tmp10, (uint32_t)5U);
+  fsquare_times(t010, b1, tmp10, 5U);
   fmul0(b1, t010, b1, tmp);
-  uint64_t *b10 = t1 + (uint32_t)4U;
-  uint64_t *c10 = t1 + (uint32_t)8U;
-  uint64_t *t011 = t1 + (uint32_t)12U;
+  uint64_t *b10 = t1 + 4U;
+  uint64_t *c10 = t1 + 8U;
+  uint64_t *t011 = t1 + 12U;
   uint64_t *tmp11 = tmp;
-  fsquare_times(t011, b10, tmp11, (uint32_t)10U);
+  fsquare_times(t011, b10, tmp11, 10U);
   fmul0(c10, t011, b10, tmp);
-  fsquare_times(t011, c10, tmp11, (uint32_t)20U);
+  fsquare_times(t011, c10, tmp11, 20U);
   fmul0(t011, t011, c10, tmp);
-  fsquare_times(t011, t011, tmp11, (uint32_t)10U);
+  fsquare_times(t011, t011, tmp11, 10U);
   fmul0(b10, t011, b10, tmp);
-  fsquare_times(t011, b10, tmp11, (uint32_t)50U);
+  fsquare_times(t011, b10, tmp11, 50U);
   fmul0(c10, t011, b10, tmp);
-  uint64_t *b11 = t1 + (uint32_t)4U;
-  uint64_t *c1 = t1 + (uint32_t)8U;
-  uint64_t *t01 = t1 + (uint32_t)12U;
+  uint64_t *b11 = t1 + 4U;
+  uint64_t *c1 = t1 + 8U;
+  uint64_t *t01 = t1 + 12U;
   uint64_t *tmp1 = tmp;
-  fsquare_times(t01, c1, tmp1, (uint32_t)100U);
+  fsquare_times(t01, c1, tmp1, 100U);
   fmul0(t01, t01, c1, tmp);
-  fsquare_times(t01, t01, tmp1, (uint32_t)50U);
+  fsquare_times(t01, t01, tmp1, 50U);
   fmul0(t01, t01, b11, tmp);
-  fsquare_times(t01, t01, tmp1, (uint32_t)5U);
+  fsquare_times(t01, t01, tmp1, 5U);
   uint64_t *a = t1;
-  uint64_t *t0 = t1 + (uint32_t)12U;
+  uint64_t *t0 = t1 + 12U;
   fmul0(o, t0, a, tmp);
 }
 
 static void store_felem(uint64_t *b, uint64_t *f)
 {
   uint64_t f30 = f[3U];
-  uint64_t top_bit0 = f30 >> (uint32_t)63U;
-  f[3U] = f30 & (uint64_t)0x7fffffffffffffffU;
-  add_scalar0(f, f, (uint64_t)19U * top_bit0);
+  uint64_t top_bit0 = f30 >> 63U;
+  f[3U] = f30 & 0x7fffffffffffffffULL;
+  add_scalar0(f, f, 19ULL * top_bit0);
   uint64_t f31 = f[3U];
-  uint64_t top_bit = f31 >> (uint32_t)63U;
-  f[3U] = f31 & (uint64_t)0x7fffffffffffffffU;
-  add_scalar0(f, f, (uint64_t)19U * top_bit);
+  uint64_t top_bit = f31 >> 63U;
+  f[3U] = f31 & 0x7fffffffffffffffULL;
+  add_scalar0(f, f, 19ULL * top_bit);
   uint64_t f0 = f[0U];
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
-  uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0xffffffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0xffffffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0xffffffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7fffffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f0, 0xffffffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f1, 0xffffffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f2, 0xffffffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f3, 0x7fffffffffffffffULL);
   uint64_t mask = ((m0 & m1) & m2) & m3;
-  uint64_t f0_ = f0 - (mask & (uint64_t)0xffffffffffffffedU);
-  uint64_t f1_ = f1 - (mask & (uint64_t)0xffffffffffffffffU);
-  uint64_t f2_ = f2 - (mask & (uint64_t)0xffffffffffffffffU);
-  uint64_t f3_ = f3 - (mask & (uint64_t)0x7fffffffffffffffU);
+  uint64_t f0_ = f0 - (mask & 0xffffffffffffffedULL);
+  uint64_t f1_ = f1 - (mask & 0xffffffffffffffffULL);
+  uint64_t f2_ = f2 - (mask & 0xffffffffffffffffULL);
+  uint64_t f3_ = f3 - (mask & 0x7fffffffffffffffULL);
   uint64_t o0 = f0_;
   uint64_t o1 = f1_;
   uint64_t o2 = f2_;
@@ -319,18 +314,14 @@ static void store_felem(uint64_t *b, uint64_t *f)
 static void encode_point(uint8_t *o, uint64_t *i)
 {
   uint64_t *x = i;
-  uint64_t *z = i + (uint32_t)4U;
+  uint64_t *z = i + 4U;
   uint64_t tmp[4U] = { 0U };
   uint64_t u64s[4U] = { 0U };
   uint64_t tmp_w[16U] = { 0U };
   finv(tmp, z, tmp_w);
   fmul0(tmp, tmp, x, tmp_w);
   store_felem(u64s, tmp);
-  KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(o + i0 * (uint32_t)8U, u64s[i0]););
+  KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, store64_le(o + i0 * 8U, u64s[i0]););
 }
 
 /**
@@ -345,23 +336,23 @@ void Hacl_Curve25519_64_scalarmult(uint8_t *out, uint8_t *priv, uint8_t *pub)
   uint64_t init[8U] = { 0U };
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = tmp;
-    uint8_t *bj = pub + i * (uint32_t)8U;
+    uint8_t *bj = pub + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t tmp3 = tmp[3U];
-  tmp[3U] = tmp3 & (uint64_t)0x7fffffffffffffffU;
+  tmp[3U] = tmp3 & 0x7fffffffffffffffULL;
   uint64_t *x = init;
-  uint64_t *z = init + (uint32_t)4U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
+  uint64_t *z = init + 4U;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
   x[0U] = tmp[0U];
   x[1U] = tmp[1U];
   x[2U] = tmp[2U];
@@ -381,7 +372,7 @@ This computes a scalar multiplication of the secret/private key with the curve's
 void Hacl_Curve25519_64_secret_to_public(uint8_t *pub, uint8_t *priv)
 {
   uint8_t basepoint[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = basepoint;
     uint8_t x = g25519[i];
@@ -401,14 +392,14 @@ bool Hacl_Curve25519_64_ecdh(uint8_t *out, uint8_t *priv, uint8_t *pub)
 {
   uint8_t zeros[32U] = { 0U };
   Hacl_Curve25519_64_scalarmult(out, priv, pub);
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(out[i], zeros[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  bool r = z == (uint8_t)255U;
+  bool r = z == 255U;
   return !r;
 }
 
diff --git a/src/msvc/Hacl_EC_Ed25519.c b/src/msvc/Hacl_EC_Ed25519.c
index 46f2837b..6ab24a33 100644
--- a/src/msvc/Hacl_EC_Ed25519.c
+++ b/src/msvc/Hacl_EC_Ed25519.c
@@ -43,11 +43,11 @@ Write the additive identity in `f`.
 */
 void Hacl_EC_Ed25519_mk_felem_zero(uint64_t *b)
 {
-  b[0U] = (uint64_t)0U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
 }
 
 /**
@@ -57,11 +57,11 @@ Write the multiplicative identity in `f`.
 */
 void Hacl_EC_Ed25519_mk_felem_one(uint64_t *b)
 {
-  b[0U] = (uint64_t)1U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 1ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
 }
 
 /**
@@ -106,8 +106,8 @@ Write `a * b mod p` in `out`.
 void Hacl_EC_Ed25519_felem_mul(uint64_t *a, uint64_t *b, uint64_t *out)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fmul(out, a, b, tmp);
 }
 
@@ -123,8 +123,8 @@ Write `a * a mod p` in `out`.
 void Hacl_EC_Ed25519_felem_sqr(uint64_t *a, uint64_t *out)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp);
 }
 
@@ -205,29 +205,29 @@ Write the base point (generator) in `p`.
 void Hacl_EC_Ed25519_mk_base_point(uint64_t *p)
 {
   uint64_t *gx = p;
-  uint64_t *gy = p + (uint32_t)5U;
-  uint64_t *gz = p + (uint32_t)10U;
-  uint64_t *gt = p + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = p + 5U;
+  uint64_t *gz = p + 10U;
+  uint64_t *gt = p + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
 }
 
 /**
diff --git a/src/msvc/Hacl_EC_K256.c b/src/msvc/Hacl_EC_K256.c
index e48edb5b..581c223b 100644
--- a/src/msvc/Hacl_EC_K256.c
+++ b/src/msvc/Hacl_EC_K256.c
@@ -43,7 +43,7 @@ Write the additive identity in `f`.
 */
 void Hacl_EC_K256_mk_felem_zero(uint64_t *f)
 {
-  memset(f, 0U, (uint32_t)5U * sizeof (uint64_t));
+  memset(f, 0U, 5U * sizeof (uint64_t));
 }
 
 /**
@@ -53,8 +53,8 @@ Write the multiplicative identity in `f`.
 */
 void Hacl_EC_K256_mk_felem_one(uint64_t *f)
 {
-  memset(f, 0U, (uint32_t)5U * sizeof (uint64_t));
-  f[0U] = (uint64_t)1U;
+  memset(f, 0U, 5U * sizeof (uint64_t));
+  f[0U] = 1ULL;
 }
 
 /**
@@ -83,7 +83,7 @@ Write `a - b mod p` in `out`.
 */
 void Hacl_EC_K256_felem_sub(uint64_t *a, uint64_t *b, uint64_t *out)
 {
-  Hacl_K256_Field_fsub(out, a, b, (uint64_t)2U);
+  Hacl_K256_Field_fsub(out, a, b, 2ULL);
   Hacl_K256_Field_fnormalize_weak(out, out);
 }
 
@@ -189,20 +189,20 @@ Write the base point (generator) in `p`.
 void Hacl_EC_K256_mk_base_point(uint64_t *p)
 {
   uint64_t *gx = p;
-  uint64_t *gy = p + (uint32_t)5U;
-  uint64_t *gz = p + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = p + 5U;
+  uint64_t *gz = p + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
 }
 
 /**
@@ -264,11 +264,11 @@ void Hacl_EC_K256_point_mul(uint8_t *scalar, uint64_t *p, uint64_t *out)
 {
   uint64_t scalar_q[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = scalar_q;
-    uint64_t u = load64_be(scalar + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(scalar + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   Hacl_Impl_K256_PointMul_point_mul(out, scalar_q, p);
@@ -307,20 +307,20 @@ void Hacl_EC_K256_point_load(uint8_t *b, uint64_t *out)
 {
   uint64_t p_aff[10U] = { 0U };
   uint64_t *px = p_aff;
-  uint64_t *py = p_aff + (uint32_t)5U;
+  uint64_t *py = p_aff + 5U;
   uint8_t *pxb = b;
-  uint8_t *pyb = b + (uint32_t)32U;
+  uint8_t *pyb = b + 32U;
   Hacl_K256_Field_load_felem(px, pxb);
   Hacl_K256_Field_load_felem(py, pyb);
   uint64_t *x = p_aff;
-  uint64_t *y = p_aff + (uint32_t)5U;
+  uint64_t *y = p_aff + 5U;
   uint64_t *x1 = out;
-  uint64_t *y1 = out + (uint32_t)5U;
-  uint64_t *z1 = out + (uint32_t)10U;
-  memcpy(x1, x, (uint32_t)5U * sizeof (uint64_t));
-  memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-  memset(z1, 0U, (uint32_t)5U * sizeof (uint64_t));
-  z1[0U] = (uint64_t)1U;
+  uint64_t *y1 = out + 5U;
+  uint64_t *z1 = out + 10U;
+  memcpy(x1, x, 5U * sizeof (uint64_t));
+  memcpy(y1, y, 5U * sizeof (uint64_t));
+  memset(z1, 0U, 5U * sizeof (uint64_t));
+  z1[0U] = 1ULL;
 }
 
 /**
diff --git a/src/msvc/Hacl_Ed25519.c b/src/msvc/Hacl_Ed25519.c
index f9881e91..05d96cd0 100644
--- a/src/msvc/Hacl_Ed25519.c
+++ b/src/msvc/Hacl_Ed25519.c
@@ -49,24 +49,24 @@ void Hacl_Bignum25519_reduce_513(uint64_t *a)
   uint64_t f2 = a[2U];
   uint64_t f3 = a[3U];
   uint64_t f4 = a[4U];
-  uint64_t l_ = f0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = f0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = f1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = f2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = f3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = f4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   a[0U] = tmp0_;
   a[1U] = tmp1 + c5;
   a[2U] = tmp2;
@@ -77,8 +77,8 @@ void Hacl_Bignum25519_reduce_513(uint64_t *a)
 static inline void fmul0(uint64_t *output, uint64_t *input, uint64_t *input2)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fmul(output, input, input2, tmp);
 }
 
@@ -89,11 +89,11 @@ static inline void times_2(uint64_t *out, uint64_t *a)
   uint64_t a2 = a[2U];
   uint64_t a3 = a[3U];
   uint64_t a4 = a[4U];
-  uint64_t o0 = (uint64_t)2U * a0;
-  uint64_t o1 = (uint64_t)2U * a1;
-  uint64_t o2 = (uint64_t)2U * a2;
-  uint64_t o3 = (uint64_t)2U * a3;
-  uint64_t o4 = (uint64_t)2U * a4;
+  uint64_t o0 = 2ULL * a0;
+  uint64_t o1 = 2ULL * a1;
+  uint64_t o2 = 2ULL * a2;
+  uint64_t o3 = 2ULL * a3;
+  uint64_t o4 = 2ULL * a4;
   out[0U] = o0;
   out[1U] = o1;
   out[2U] = o2;
@@ -104,54 +104,54 @@ static inline void times_2(uint64_t *out, uint64_t *a)
 static inline void times_d(uint64_t *out, uint64_t *a)
 {
   uint64_t d[5U] = { 0U };
-  d[0U] = (uint64_t)0x00034dca135978a3U;
-  d[1U] = (uint64_t)0x0001a8283b156ebdU;
-  d[2U] = (uint64_t)0x0005e7a26001c029U;
-  d[3U] = (uint64_t)0x000739c663a03cbbU;
-  d[4U] = (uint64_t)0x00052036cee2b6ffU;
+  d[0U] = 0x00034dca135978a3ULL;
+  d[1U] = 0x0001a8283b156ebdULL;
+  d[2U] = 0x0005e7a26001c029ULL;
+  d[3U] = 0x000739c663a03cbbULL;
+  d[4U] = 0x00052036cee2b6ffULL;
   fmul0(out, d, a);
 }
 
 static inline void times_2d(uint64_t *out, uint64_t *a)
 {
   uint64_t d2[5U] = { 0U };
-  d2[0U] = (uint64_t)0x00069b9426b2f159U;
-  d2[1U] = (uint64_t)0x00035050762add7aU;
-  d2[2U] = (uint64_t)0x0003cf44c0038052U;
-  d2[3U] = (uint64_t)0x0006738cc7407977U;
-  d2[4U] = (uint64_t)0x0002406d9dc56dffU;
+  d2[0U] = 0x00069b9426b2f159ULL;
+  d2[1U] = 0x00035050762add7aULL;
+  d2[2U] = 0x0003cf44c0038052ULL;
+  d2[3U] = 0x0006738cc7407977ULL;
+  d2[4U] = 0x0002406d9dc56dffULL;
   fmul0(out, d2, a);
 }
 
 static inline void fsquare(uint64_t *out, uint64_t *a)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Impl_Curve25519_Field51_fsqr(out, a, tmp);
 }
 
 static inline void fsquare_times(uint64_t *output, uint64_t *input, uint32_t count)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_fsquare_times(output, input, tmp, count);
 }
 
 static inline void fsquare_times_inplace(uint64_t *output, uint32_t count)
 {
   FStar_UInt128_uint128 tmp[5U];
-  for (uint32_t _i = 0U; _i < (uint32_t)5U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 5U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_fsquare_times(output, output, tmp, count);
 }
 
 void Hacl_Bignum25519_inverse(uint64_t *out, uint64_t *a)
 {
   FStar_UInt128_uint128 tmp[10U];
-  for (uint32_t _i = 0U; _i < (uint32_t)10U; ++_i)
-    tmp[_i] = FStar_UInt128_uint64_to_uint128((uint64_t)0U);
+  for (uint32_t _i = 0U; _i < 10U; ++_i)
+    tmp[_i] = FStar_UInt128_uint64_to_uint128(0ULL);
   Hacl_Curve25519_51_finv(out, a, tmp);
 }
 
@@ -162,40 +162,40 @@ static inline void reduce(uint64_t *out)
   uint64_t o2 = out[2U];
   uint64_t o3 = out[3U];
   uint64_t o4 = out[4U];
-  uint64_t l_ = o0 + (uint64_t)0U;
-  uint64_t tmp0 = l_ & (uint64_t)0x7ffffffffffffU;
-  uint64_t c0 = l_ >> (uint32_t)51U;
+  uint64_t l_ = o0 + 0ULL;
+  uint64_t tmp0 = l_ & 0x7ffffffffffffULL;
+  uint64_t c0 = l_ >> 51U;
   uint64_t l_0 = o1 + c0;
-  uint64_t tmp1 = l_0 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c1 = l_0 >> (uint32_t)51U;
+  uint64_t tmp1 = l_0 & 0x7ffffffffffffULL;
+  uint64_t c1 = l_0 >> 51U;
   uint64_t l_1 = o2 + c1;
-  uint64_t tmp2 = l_1 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c2 = l_1 >> (uint32_t)51U;
+  uint64_t tmp2 = l_1 & 0x7ffffffffffffULL;
+  uint64_t c2 = l_1 >> 51U;
   uint64_t l_2 = o3 + c2;
-  uint64_t tmp3 = l_2 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c3 = l_2 >> (uint32_t)51U;
+  uint64_t tmp3 = l_2 & 0x7ffffffffffffULL;
+  uint64_t c3 = l_2 >> 51U;
   uint64_t l_3 = o4 + c3;
-  uint64_t tmp4 = l_3 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c4 = l_3 >> (uint32_t)51U;
-  uint64_t l_4 = tmp0 + c4 * (uint64_t)19U;
-  uint64_t tmp0_ = l_4 & (uint64_t)0x7ffffffffffffU;
-  uint64_t c5 = l_4 >> (uint32_t)51U;
+  uint64_t tmp4 = l_3 & 0x7ffffffffffffULL;
+  uint64_t c4 = l_3 >> 51U;
+  uint64_t l_4 = tmp0 + c4 * 19ULL;
+  uint64_t tmp0_ = l_4 & 0x7ffffffffffffULL;
+  uint64_t c5 = l_4 >> 51U;
   uint64_t f0 = tmp0_;
   uint64_t f1 = tmp1 + c5;
   uint64_t f2 = tmp2;
   uint64_t f3 = tmp3;
   uint64_t f4 = tmp4;
-  uint64_t m0 = FStar_UInt64_gte_mask(f0, (uint64_t)0x7ffffffffffedU);
-  uint64_t m1 = FStar_UInt64_eq_mask(f1, (uint64_t)0x7ffffffffffffU);
-  uint64_t m2 = FStar_UInt64_eq_mask(f2, (uint64_t)0x7ffffffffffffU);
-  uint64_t m3 = FStar_UInt64_eq_mask(f3, (uint64_t)0x7ffffffffffffU);
-  uint64_t m4 = FStar_UInt64_eq_mask(f4, (uint64_t)0x7ffffffffffffU);
+  uint64_t m0 = FStar_UInt64_gte_mask(f0, 0x7ffffffffffedULL);
+  uint64_t m1 = FStar_UInt64_eq_mask(f1, 0x7ffffffffffffULL);
+  uint64_t m2 = FStar_UInt64_eq_mask(f2, 0x7ffffffffffffULL);
+  uint64_t m3 = FStar_UInt64_eq_mask(f3, 0x7ffffffffffffULL);
+  uint64_t m4 = FStar_UInt64_eq_mask(f4, 0x7ffffffffffffULL);
   uint64_t mask = (((m0 & m1) & m2) & m3) & m4;
-  uint64_t f0_ = f0 - (mask & (uint64_t)0x7ffffffffffedU);
-  uint64_t f1_ = f1 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f2_ = f2 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f3_ = f3 - (mask & (uint64_t)0x7ffffffffffffU);
-  uint64_t f4_ = f4 - (mask & (uint64_t)0x7ffffffffffffU);
+  uint64_t f0_ = f0 - (mask & 0x7ffffffffffedULL);
+  uint64_t f1_ = f1 - (mask & 0x7ffffffffffffULL);
+  uint64_t f2_ = f2 - (mask & 0x7ffffffffffffULL);
+  uint64_t f3_ = f3 - (mask & 0x7ffffffffffffULL);
+  uint64_t f4_ = f4 - (mask & 0x7ffffffffffffULL);
   uint64_t f01 = f0_;
   uint64_t f11 = f1_;
   uint64_t f21 = f2_;
@@ -212,45 +212,41 @@ void Hacl_Bignum25519_load_51(uint64_t *output, uint8_t *input)
 {
   uint64_t u64s[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = u64s;
-    uint8_t *bj = input + i * (uint32_t)8U;
+    uint8_t *bj = input + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t u64s3 = u64s[3U];
-  u64s[3U] = u64s3 & (uint64_t)0x7fffffffffffffffU;
-  output[0U] = u64s[0U] & (uint64_t)0x7ffffffffffffU;
-  output[1U] = u64s[0U] >> (uint32_t)51U | (u64s[1U] & (uint64_t)0x3fffffffffU) << (uint32_t)13U;
-  output[2U] = u64s[1U] >> (uint32_t)38U | (u64s[2U] & (uint64_t)0x1ffffffU) << (uint32_t)26U;
-  output[3U] = u64s[2U] >> (uint32_t)25U | (u64s[3U] & (uint64_t)0xfffU) << (uint32_t)39U;
-  output[4U] = u64s[3U] >> (uint32_t)12U;
+  u64s[3U] = u64s3 & 0x7fffffffffffffffULL;
+  output[0U] = u64s[0U] & 0x7ffffffffffffULL;
+  output[1U] = u64s[0U] >> 51U | (u64s[1U] & 0x3fffffffffULL) << 13U;
+  output[2U] = u64s[1U] >> 38U | (u64s[2U] & 0x1ffffffULL) << 26U;
+  output[3U] = u64s[2U] >> 25U | (u64s[3U] & 0xfffULL) << 39U;
+  output[4U] = u64s[3U] >> 12U;
 }
 
 void Hacl_Bignum25519_store_51(uint8_t *output, uint64_t *input)
 {
   uint64_t u64s[4U] = { 0U };
   Hacl_Impl_Curve25519_Field51_store_felem(u64s, input);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(output + i * (uint32_t)8U, u64s[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(output + i * 8U, u64s[i]););
 }
 
 void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp20 = tmp + (uint32_t)5U;
-  uint64_t *tmp30 = tmp + (uint32_t)10U;
-  uint64_t *tmp40 = tmp + (uint32_t)15U;
+  uint64_t *tmp20 = tmp + 5U;
+  uint64_t *tmp30 = tmp + 10U;
+  uint64_t *tmp40 = tmp + 15U;
   uint64_t *x10 = p;
-  uint64_t *y10 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y10 = p + 5U;
+  uint64_t *z1 = p + 10U;
   fsquare(tmp1, x10);
   fsquare(tmp20, y10);
   fsum(tmp30, tmp1, tmp20);
@@ -258,11 +254,11 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
   fsquare(tmp1, z1);
   times_2(tmp1, tmp1);
   uint64_t *tmp10 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)5U;
-  uint64_t *tmp3 = tmp + (uint32_t)10U;
-  uint64_t *tmp4 = tmp + (uint32_t)15U;
+  uint64_t *tmp2 = tmp + 5U;
+  uint64_t *tmp3 = tmp + 10U;
+  uint64_t *tmp4 = tmp + 15U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
+  uint64_t *y1 = p + 5U;
   fsum(tmp2, x1, y1);
   fsquare(tmp2, tmp2);
   Hacl_Bignum25519_reduce_513(tmp3);
@@ -271,13 +267,13 @@ void Hacl_Impl_Ed25519_PointDouble_point_double(uint64_t *out, uint64_t *p)
   Hacl_Bignum25519_reduce_513(tmp4);
   fsum(tmp10, tmp10, tmp4);
   uint64_t *tmp_f = tmp;
-  uint64_t *tmp_e = tmp + (uint32_t)5U;
-  uint64_t *tmp_h = tmp + (uint32_t)10U;
-  uint64_t *tmp_g = tmp + (uint32_t)15U;
+  uint64_t *tmp_e = tmp + 5U;
+  uint64_t *tmp_h = tmp + 10U;
+  uint64_t *tmp_g = tmp + 15U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
-  uint64_t *t3 = out + (uint32_t)15U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
+  uint64_t *t3 = out + 15U;
   fmul0(x3, tmp_e, tmp_f);
   fmul0(y3, tmp_g, tmp_h);
   fmul0(t3, tmp_e, tmp_h);
@@ -288,13 +284,13 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
 {
   uint64_t tmp[30U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp20 = tmp + (uint32_t)5U;
-  uint64_t *tmp30 = tmp + (uint32_t)10U;
-  uint64_t *tmp40 = tmp + (uint32_t)15U;
+  uint64_t *tmp20 = tmp + 5U;
+  uint64_t *tmp30 = tmp + 10U;
+  uint64_t *tmp40 = tmp + 15U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
+  uint64_t *y1 = p + 5U;
   uint64_t *x2 = q;
-  uint64_t *y2 = q + (uint32_t)5U;
+  uint64_t *y2 = q + 5U;
   fdifference(tmp1, y1, x1);
   fdifference(tmp20, y2, x2);
   fmul0(tmp30, tmp1, tmp20);
@@ -302,15 +298,15 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
   fsum(tmp20, y2, x2);
   fmul0(tmp40, tmp1, tmp20);
   uint64_t *tmp10 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)5U;
-  uint64_t *tmp3 = tmp + (uint32_t)10U;
-  uint64_t *tmp4 = tmp + (uint32_t)15U;
-  uint64_t *tmp5 = tmp + (uint32_t)20U;
-  uint64_t *tmp6 = tmp + (uint32_t)25U;
-  uint64_t *z1 = p + (uint32_t)10U;
-  uint64_t *t1 = p + (uint32_t)15U;
-  uint64_t *z2 = q + (uint32_t)10U;
-  uint64_t *t2 = q + (uint32_t)15U;
+  uint64_t *tmp2 = tmp + 5U;
+  uint64_t *tmp3 = tmp + 10U;
+  uint64_t *tmp4 = tmp + 15U;
+  uint64_t *tmp5 = tmp + 20U;
+  uint64_t *tmp6 = tmp + 25U;
+  uint64_t *z1 = p + 10U;
+  uint64_t *t1 = p + 15U;
+  uint64_t *z2 = q + 10U;
+  uint64_t *t2 = q + 15U;
   times_2d(tmp10, t1);
   fmul0(tmp10, tmp10, t2);
   times_2(tmp2, z1);
@@ -320,13 +316,13 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
   fsum(tmp10, tmp2, tmp10);
   fsum(tmp2, tmp4, tmp3);
   uint64_t *tmp_g = tmp;
-  uint64_t *tmp_h = tmp + (uint32_t)5U;
-  uint64_t *tmp_e = tmp + (uint32_t)20U;
-  uint64_t *tmp_f = tmp + (uint32_t)25U;
+  uint64_t *tmp_h = tmp + 5U;
+  uint64_t *tmp_e = tmp + 20U;
+  uint64_t *tmp_f = tmp + 25U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
-  uint64_t *t3 = out + (uint32_t)15U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
+  uint64_t *t3 = out + 15U;
   fmul0(x3, tmp_e, tmp_f);
   fmul0(y3, tmp_g, tmp_h);
   fmul0(t3, tmp_e, tmp_h);
@@ -336,64 +332,64 @@ void Hacl_Impl_Ed25519_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *
 void Hacl_Impl_Ed25519_PointConstants_make_point_inf(uint64_t *b)
 {
   uint64_t *x = b;
-  uint64_t *y = b + (uint32_t)5U;
-  uint64_t *z = b + (uint32_t)10U;
-  uint64_t *t = b + (uint32_t)15U;
-  x[0U] = (uint64_t)0U;
-  x[1U] = (uint64_t)0U;
-  x[2U] = (uint64_t)0U;
-  x[3U] = (uint64_t)0U;
-  x[4U] = (uint64_t)0U;
-  y[0U] = (uint64_t)1U;
-  y[1U] = (uint64_t)0U;
-  y[2U] = (uint64_t)0U;
-  y[3U] = (uint64_t)0U;
-  y[4U] = (uint64_t)0U;
-  z[0U] = (uint64_t)1U;
-  z[1U] = (uint64_t)0U;
-  z[2U] = (uint64_t)0U;
-  z[3U] = (uint64_t)0U;
-  z[4U] = (uint64_t)0U;
-  t[0U] = (uint64_t)0U;
-  t[1U] = (uint64_t)0U;
-  t[2U] = (uint64_t)0U;
-  t[3U] = (uint64_t)0U;
-  t[4U] = (uint64_t)0U;
+  uint64_t *y = b + 5U;
+  uint64_t *z = b + 10U;
+  uint64_t *t = b + 15U;
+  x[0U] = 0ULL;
+  x[1U] = 0ULL;
+  x[2U] = 0ULL;
+  x[3U] = 0ULL;
+  x[4U] = 0ULL;
+  y[0U] = 1ULL;
+  y[1U] = 0ULL;
+  y[2U] = 0ULL;
+  y[3U] = 0ULL;
+  y[4U] = 0ULL;
+  z[0U] = 1ULL;
+  z[1U] = 0ULL;
+  z[2U] = 0ULL;
+  z[3U] = 0ULL;
+  z[4U] = 0ULL;
+  t[0U] = 0ULL;
+  t[1U] = 0ULL;
+  t[2U] = 0ULL;
+  t[3U] = 0ULL;
+  t[4U] = 0ULL;
 }
 
 static inline void pow2_252m2(uint64_t *out, uint64_t *z)
 {
   uint64_t buf[20U] = { 0U };
   uint64_t *a = buf;
-  uint64_t *t00 = buf + (uint32_t)5U;
-  uint64_t *b0 = buf + (uint32_t)10U;
-  uint64_t *c0 = buf + (uint32_t)15U;
-  fsquare_times(a, z, (uint32_t)1U);
-  fsquare_times(t00, a, (uint32_t)2U);
+  uint64_t *t00 = buf + 5U;
+  uint64_t *b0 = buf + 10U;
+  uint64_t *c0 = buf + 15U;
+  fsquare_times(a, z, 1U);
+  fsquare_times(t00, a, 2U);
   fmul0(b0, t00, z);
   fmul0(a, b0, a);
-  fsquare_times(t00, a, (uint32_t)1U);
+  fsquare_times(t00, a, 1U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)5U);
+  fsquare_times(t00, b0, 5U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)10U);
+  fsquare_times(t00, b0, 10U);
   fmul0(c0, t00, b0);
-  fsquare_times(t00, c0, (uint32_t)20U);
+  fsquare_times(t00, c0, 20U);
   fmul0(t00, t00, c0);
-  fsquare_times_inplace(t00, (uint32_t)10U);
+  fsquare_times_inplace(t00, 10U);
   fmul0(b0, t00, b0);
-  fsquare_times(t00, b0, (uint32_t)50U);
+  fsquare_times(t00, b0, 50U);
   uint64_t *a0 = buf;
-  uint64_t *t0 = buf + (uint32_t)5U;
-  uint64_t *b = buf + (uint32_t)10U;
-  uint64_t *c = buf + (uint32_t)15U;
-  fsquare_times(a0, z, (uint32_t)1U);
+  uint64_t *t0 = buf + 5U;
+  uint64_t *b = buf + 10U;
+  uint64_t *c = buf + 15U;
+  fsquare_times(a0, z, 1U);
   fmul0(c, t0, b);
-  fsquare_times(t0, c, (uint32_t)100U);
+  fsquare_times(t0, c, 100U);
   fmul0(t0, t0, c);
-  fsquare_times_inplace(t0, (uint32_t)50U);
+  fsquare_times_inplace(t0, 50U);
   fmul0(t0, t0, b);
-  fsquare_times_inplace(t0, (uint32_t)2U);
+  fsquare_times_inplace(t0, 2U);
   fmul0(out, t0, a0);
 }
 
@@ -404,23 +400,17 @@ static inline bool is_0(uint64_t *x)
   uint64_t x2 = x[2U];
   uint64_t x3 = x[3U];
   uint64_t x4 = x[4U];
-  return
-    x0
-    == (uint64_t)0U
-    && x1 == (uint64_t)0U
-    && x2 == (uint64_t)0U
-    && x3 == (uint64_t)0U
-    && x4 == (uint64_t)0U;
+  return x0 == 0ULL && x1 == 0ULL && x2 == 0ULL && x3 == 0ULL && x4 == 0ULL;
 }
 
 static inline void mul_modp_sqrt_m1(uint64_t *x)
 {
   uint64_t sqrt_m1[5U] = { 0U };
-  sqrt_m1[0U] = (uint64_t)0x00061b274a0ea0b0U;
-  sqrt_m1[1U] = (uint64_t)0x0000d5a5fc8f189dU;
-  sqrt_m1[2U] = (uint64_t)0x0007ef5e9cbd0c60U;
-  sqrt_m1[3U] = (uint64_t)0x00078595a6804c9eU;
-  sqrt_m1[4U] = (uint64_t)0x0002b8324804fc1dU;
+  sqrt_m1[0U] = 0x00061b274a0ea0b0ULL;
+  sqrt_m1[1U] = 0x0000d5a5fc8f189dULL;
+  sqrt_m1[2U] = 0x0007ef5e9cbd0c60ULL;
+  sqrt_m1[3U] = 0x00078595a6804c9eULL;
+  sqrt_m1[4U] = 0x0002b8324804fc1dULL;
   fmul0(x, x, sqrt_m1);
 }
 
@@ -436,11 +426,11 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
   bool
   b =
     x00
-    >= (uint64_t)0x7ffffffffffedU
-    && x1 == (uint64_t)0x7ffffffffffffU
-    && x21 == (uint64_t)0x7ffffffffffffU
-    && x30 == (uint64_t)0x7ffffffffffffU
-    && x4 == (uint64_t)0x7ffffffffffffU;
+    >= 0x7ffffffffffedULL
+    && x1 == 0x7ffffffffffffULL
+    && x21 == 0x7ffffffffffffULL
+    && x30 == 0x7ffffffffffffULL
+    && x4 == 0x7ffffffffffffULL;
   bool res;
   if (b)
   {
@@ -450,14 +440,14 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
   {
     uint64_t tmp1[20U] = { 0U };
     uint64_t *one = tmp1;
-    uint64_t *y2 = tmp1 + (uint32_t)5U;
-    uint64_t *dyyi = tmp1 + (uint32_t)10U;
-    uint64_t *dyy = tmp1 + (uint32_t)15U;
-    one[0U] = (uint64_t)1U;
-    one[1U] = (uint64_t)0U;
-    one[2U] = (uint64_t)0U;
-    one[3U] = (uint64_t)0U;
-    one[4U] = (uint64_t)0U;
+    uint64_t *y2 = tmp1 + 5U;
+    uint64_t *dyyi = tmp1 + 10U;
+    uint64_t *dyy = tmp1 + 15U;
+    one[0U] = 1ULL;
+    one[1U] = 0ULL;
+    one[2U] = 0ULL;
+    one[3U] = 0ULL;
+    one[4U] = 0ULL;
     fsquare(y2, y);
     times_d(dyy, y2);
     fsum(dyy, dyy, one);
@@ -470,37 +460,37 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
     uint8_t z;
     if (x2_is_0)
     {
-      if (sign == (uint64_t)0U)
+      if (sign == 0ULL)
       {
-        x[0U] = (uint64_t)0U;
-        x[1U] = (uint64_t)0U;
-        x[2U] = (uint64_t)0U;
-        x[3U] = (uint64_t)0U;
-        x[4U] = (uint64_t)0U;
-        z = (uint8_t)1U;
+        x[0U] = 0ULL;
+        x[1U] = 0ULL;
+        x[2U] = 0ULL;
+        x[3U] = 0ULL;
+        x[4U] = 0ULL;
+        z = 1U;
       }
       else
       {
-        z = (uint8_t)0U;
+        z = 0U;
       }
     }
     else
     {
-      z = (uint8_t)2U;
+      z = 2U;
     }
-    if (z == (uint8_t)0U)
+    if (z == 0U)
     {
       res = false;
     }
-    else if (z == (uint8_t)1U)
+    else if (z == 1U)
     {
       res = true;
     }
     else
     {
       uint64_t *x210 = tmp;
-      uint64_t *x31 = tmp + (uint32_t)5U;
-      uint64_t *t00 = tmp + (uint32_t)10U;
+      uint64_t *x31 = tmp + 5U;
+      uint64_t *t00 = tmp + 10U;
       pow2_252m2(x31, x210);
       fsquare(t00, x31);
       fdifference(t00, t00, x210);
@@ -512,8 +502,8 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
         mul_modp_sqrt_m1(x31);
       }
       uint64_t *x211 = tmp;
-      uint64_t *x3 = tmp + (uint32_t)5U;
-      uint64_t *t01 = tmp + (uint32_t)10U;
+      uint64_t *x3 = tmp + 5U;
+      uint64_t *t01 = tmp + 10U;
       fsquare(t01, x3);
       fdifference(t01, t01, x211);
       Hacl_Bignum25519_reduce_513(t01);
@@ -525,23 +515,23 @@ static inline bool recover_x(uint64_t *x, uint64_t *y, uint64_t sign)
       }
       else
       {
-        uint64_t *x32 = tmp + (uint32_t)5U;
-        uint64_t *t0 = tmp + (uint32_t)10U;
+        uint64_t *x32 = tmp + 5U;
+        uint64_t *t0 = tmp + 10U;
         reduce(x32);
         uint64_t x0 = x32[0U];
-        uint64_t x01 = x0 & (uint64_t)1U;
+        uint64_t x01 = x0 & 1ULL;
         if (!(x01 == sign))
         {
-          t0[0U] = (uint64_t)0U;
-          t0[1U] = (uint64_t)0U;
-          t0[2U] = (uint64_t)0U;
-          t0[3U] = (uint64_t)0U;
-          t0[4U] = (uint64_t)0U;
+          t0[0U] = 0ULL;
+          t0[1U] = 0ULL;
+          t0[2U] = 0ULL;
+          t0[3U] = 0ULL;
+          t0[4U] = 0ULL;
           fdifference(x32, t0, x32);
           Hacl_Bignum25519_reduce_513(x32);
           reduce(x32);
         }
-        memcpy(x, x32, (uint32_t)5U * sizeof (uint64_t));
+        memcpy(x, x32, 5U * sizeof (uint64_t));
         res = true;
       }
     }
@@ -554,9 +544,9 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
 {
   uint64_t tmp[10U] = { 0U };
   uint64_t *y = tmp;
-  uint64_t *x = tmp + (uint32_t)5U;
+  uint64_t *x = tmp + 5U;
   uint8_t s31 = s[31U];
-  uint8_t z = s31 >> (uint32_t)7U;
+  uint8_t z = (uint32_t)s31 >> 7U;
   uint64_t sign = (uint64_t)z;
   Hacl_Bignum25519_load_51(y, s);
   bool z0 = recover_x(x, y, sign);
@@ -568,16 +558,16 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
   else
   {
     uint64_t *outx = out;
-    uint64_t *outy = out + (uint32_t)5U;
-    uint64_t *outz = out + (uint32_t)10U;
-    uint64_t *outt = out + (uint32_t)15U;
-    memcpy(outx, x, (uint32_t)5U * sizeof (uint64_t));
-    memcpy(outy, y, (uint32_t)5U * sizeof (uint64_t));
-    outz[0U] = (uint64_t)1U;
-    outz[1U] = (uint64_t)0U;
-    outz[2U] = (uint64_t)0U;
-    outz[3U] = (uint64_t)0U;
-    outz[4U] = (uint64_t)0U;
+    uint64_t *outy = out + 5U;
+    uint64_t *outz = out + 10U;
+    uint64_t *outt = out + 15U;
+    memcpy(outx, x, 5U * sizeof (uint64_t));
+    memcpy(outy, y, 5U * sizeof (uint64_t));
+    outz[0U] = 1ULL;
+    outz[1U] = 0ULL;
+    outz[2U] = 0ULL;
+    outz[3U] = 0ULL;
+    outz[4U] = 0ULL;
     fmul0(outt, x, y);
     res = true;
   }
@@ -588,25 +578,25 @@ bool Hacl_Impl_Ed25519_PointDecompress_point_decompress(uint64_t *out, uint8_t *
 void Hacl_Impl_Ed25519_PointCompress_point_compress(uint8_t *z, uint64_t *p)
 {
   uint64_t tmp[15U] = { 0U };
-  uint64_t *x = tmp + (uint32_t)5U;
-  uint64_t *out = tmp + (uint32_t)10U;
+  uint64_t *x = tmp + 5U;
+  uint64_t *out = tmp + 10U;
   uint64_t *zinv1 = tmp;
-  uint64_t *x1 = tmp + (uint32_t)5U;
-  uint64_t *out1 = tmp + (uint32_t)10U;
+  uint64_t *x1 = tmp + 5U;
+  uint64_t *out1 = tmp + 10U;
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   Hacl_Bignum25519_inverse(zinv1, pz);
   fmul0(x1, px, zinv1);
   reduce(x1);
   fmul0(out1, py, zinv1);
   Hacl_Bignum25519_reduce_513(out1);
   uint64_t x0 = x[0U];
-  uint64_t b = x0 & (uint64_t)1U;
+  uint64_t b = x0 & 1ULL;
   Hacl_Bignum25519_store_51(z, out);
   uint8_t xbyte = (uint8_t)b;
   uint8_t o31 = z[31U];
-  z[31U] = o31 + (xbyte << (uint32_t)7U);
+  z[31U] = (uint32_t)o31 + ((uint32_t)xbyte << 7U);
 }
 
 static inline void barrett_reduction(uint64_t *z, uint64_t *t)
@@ -621,40 +611,40 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t t7 = t[7U];
   uint64_t t8 = t[8U];
   uint64_t t9 = t[9U];
-  uint64_t m00 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m10 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m20 = (uint64_t)0x000000000014deU;
-  uint64_t m30 = (uint64_t)0x00000000000000U;
-  uint64_t m40 = (uint64_t)0x00000010000000U;
+  uint64_t m00 = 0x12631a5cf5d3edULL;
+  uint64_t m10 = 0xf9dea2f79cd658ULL;
+  uint64_t m20 = 0x000000000014deULL;
+  uint64_t m30 = 0x00000000000000ULL;
+  uint64_t m40 = 0x00000010000000ULL;
   uint64_t m0 = m00;
   uint64_t m1 = m10;
   uint64_t m2 = m20;
   uint64_t m3 = m30;
   uint64_t m4 = m40;
-  uint64_t m010 = (uint64_t)0x9ce5a30a2c131bU;
-  uint64_t m110 = (uint64_t)0x215d086329a7edU;
-  uint64_t m210 = (uint64_t)0xffffffffeb2106U;
-  uint64_t m310 = (uint64_t)0xffffffffffffffU;
-  uint64_t m410 = (uint64_t)0x00000fffffffffU;
+  uint64_t m010 = 0x9ce5a30a2c131bULL;
+  uint64_t m110 = 0x215d086329a7edULL;
+  uint64_t m210 = 0xffffffffeb2106ULL;
+  uint64_t m310 = 0xffffffffffffffULL;
+  uint64_t m410 = 0x00000fffffffffULL;
   uint64_t mu0 = m010;
   uint64_t mu1 = m110;
   uint64_t mu2 = m210;
   uint64_t mu3 = m310;
   uint64_t mu4 = m410;
-  uint64_t y_ = (t5 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_ = t4 >> (uint32_t)24U;
+  uint64_t y_ = (t5 & 0xffffffULL) << 32U;
+  uint64_t x_ = t4 >> 24U;
   uint64_t z00 = x_ | y_;
-  uint64_t y_0 = (t6 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_0 = t5 >> (uint32_t)24U;
+  uint64_t y_0 = (t6 & 0xffffffULL) << 32U;
+  uint64_t x_0 = t5 >> 24U;
   uint64_t z10 = x_0 | y_0;
-  uint64_t y_1 = (t7 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_1 = t6 >> (uint32_t)24U;
+  uint64_t y_1 = (t7 & 0xffffffULL) << 32U;
+  uint64_t x_1 = t6 >> 24U;
   uint64_t z20 = x_1 | y_1;
-  uint64_t y_2 = (t8 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_2 = t7 >> (uint32_t)24U;
+  uint64_t y_2 = (t8 & 0xffffffULL) << 32U;
+  uint64_t x_2 = t7 >> 24U;
   uint64_t z30 = x_2 | y_2;
-  uint64_t y_3 = (t9 & (uint64_t)0xffffffU) << (uint32_t)32U;
-  uint64_t x_3 = t8 >> (uint32_t)24U;
+  uint64_t y_3 = (t9 & 0xffffffULL) << 32U;
+  uint64_t x_3 = t8 >> 24U;
   uint64_t z40 = x_3 | y_3;
   uint64_t q0 = z00;
   uint64_t q1 = z10;
@@ -707,55 +697,37 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   FStar_UInt128_uint128 z6 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42);
   FStar_UInt128_uint128 z7 = FStar_UInt128_add_mod(xy34, xy43);
   FStar_UInt128_uint128 z8 = xy44;
-  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, (uint32_t)56U);
+  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z01, 56U);
   FStar_UInt128_uint128 c00 = carry0;
-  FStar_UInt128_uint128
-  carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), (uint32_t)56U);
+  FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), 56U);
   FStar_UInt128_uint128 c10 = carry1;
-  FStar_UInt128_uint128
-  carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), (uint32_t)56U);
+  FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), 56U);
   FStar_UInt128_uint128 c20 = carry2;
-  FStar_UInt128_uint128
-  carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), (uint32_t)56U);
+  FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), 56U);
   FStar_UInt128_uint128 c30 = carry3;
-  FStar_UInt128_uint128
-  carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), (uint32_t)56U);
+  FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), 56U);
   uint64_t
-  t100 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30))
-    & (uint64_t)0xffffffffffffffU;
+  t100 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c40 = carry4;
   uint64_t t410 = t100;
-  FStar_UInt128_uint128
-  carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), (uint32_t)56U);
+  FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), 56U);
   uint64_t
-  t101 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40))
-    & (uint64_t)0xffffffffffffffU;
+  t101 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c5 = carry5;
   uint64_t t51 = t101;
-  FStar_UInt128_uint128
-  carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), (uint32_t)56U);
+  FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), 56U);
   uint64_t
-  t102 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5))
-    & (uint64_t)0xffffffffffffffU;
+  t102 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c6 = carry6;
   uint64_t t61 = t102;
-  FStar_UInt128_uint128
-  carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), (uint32_t)56U);
+  FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), 56U);
   uint64_t
-  t103 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6))
-    & (uint64_t)0xffffffffffffffU;
+  t103 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c7 = carry7;
   uint64_t t71 = t103;
-  FStar_UInt128_uint128
-  carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), (uint32_t)56U);
+  FStar_UInt128_uint128 carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), 56U);
   uint64_t
-  t104 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7))
-    & (uint64_t)0xffffffffffffffU;
+  t104 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c8 = carry8;
   uint64_t t81 = t104;
   uint64_t t91 = FStar_UInt128_uint128_to_uint64(c8);
@@ -765,20 +737,20 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t qmu7_ = t71;
   uint64_t qmu8_ = t81;
   uint64_t qmu9_ = t91;
-  uint64_t y_4 = (qmu5_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_4 = qmu4_ >> (uint32_t)40U;
+  uint64_t y_4 = (qmu5_ & 0xffffffffffULL) << 16U;
+  uint64_t x_4 = qmu4_ >> 40U;
   uint64_t z02 = x_4 | y_4;
-  uint64_t y_5 = (qmu6_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_5 = qmu5_ >> (uint32_t)40U;
+  uint64_t y_5 = (qmu6_ & 0xffffffffffULL) << 16U;
+  uint64_t x_5 = qmu5_ >> 40U;
   uint64_t z12 = x_5 | y_5;
-  uint64_t y_6 = (qmu7_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_6 = qmu6_ >> (uint32_t)40U;
+  uint64_t y_6 = (qmu7_ & 0xffffffffffULL) << 16U;
+  uint64_t x_6 = qmu6_ >> 40U;
   uint64_t z22 = x_6 | y_6;
-  uint64_t y_7 = (qmu8_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_7 = qmu7_ >> (uint32_t)40U;
+  uint64_t y_7 = (qmu8_ & 0xffffffffffULL) << 16U;
+  uint64_t x_7 = qmu7_ >> 40U;
   uint64_t z32 = x_7 | y_7;
-  uint64_t y_8 = (qmu9_ & (uint64_t)0xffffffffffU) << (uint32_t)16U;
-  uint64_t x_8 = qmu8_ >> (uint32_t)40U;
+  uint64_t y_8 = (qmu9_ & 0xffffffffffULL) << 16U;
+  uint64_t x_8 = qmu8_ >> 40U;
   uint64_t z42 = x_8 | y_8;
   uint64_t qdiv0 = z02;
   uint64_t qdiv1 = z12;
@@ -789,7 +761,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   uint64_t r1 = t1;
   uint64_t r2 = t2;
   uint64_t r3 = t3;
-  uint64_t r4 = t4 & (uint64_t)0xffffffffffU;
+  uint64_t r4 = t4 & 0xffffffffffULL;
   FStar_UInt128_uint128 xy00 = FStar_UInt128_mul_wide(qdiv0, m0);
   FStar_UInt128_uint128 xy01 = FStar_UInt128_mul_wide(qdiv0, m1);
   FStar_UInt128_uint128 xy02 = FStar_UInt128_mul_wide(qdiv0, m2);
@@ -805,18 +777,18 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
   FStar_UInt128_uint128 xy30 = FStar_UInt128_mul_wide(qdiv3, m0);
   FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(qdiv3, m1);
   FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(qdiv4, m0);
-  FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, (uint32_t)56U);
-  uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU;
+  FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, 56U);
+  uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c0 = carry9;
   uint64_t t010 = t105;
   FStar_UInt128_uint128
   carry10 =
     FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t106 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c11 = carry10;
   uint64_t t110 = t106;
   FStar_UInt128_uint128
@@ -825,14 +797,14 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy11),
           xy20),
         c11),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t107 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02,
             xy11),
           xy20),
         c11))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c21 = carry11;
   uint64_t t210 = t107;
   FStar_UInt128_uint128
@@ -842,7 +814,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy21),
           xy30),
         c21),
-      (uint32_t)56U);
+      56U);
   uint64_t
   t108 =
     FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03,
@@ -850,7 +822,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy21),
           xy30),
         c21))
-    & (uint64_t)0xffffffffffffffU;
+    & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c31 = carry;
   uint64_t t310 = t108;
   uint64_t
@@ -861,67 +833,67 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t)
             xy31),
           xy40),
         c31))
-    & (uint64_t)0xffffffffffU;
+    & 0xffffffffffULL;
   uint64_t qmul0 = t010;
   uint64_t qmul1 = t110;
   uint64_t qmul2 = t210;
   uint64_t qmul3 = t310;
   uint64_t qmul4 = t411;
-  uint64_t b5 = (r0 - qmul0) >> (uint32_t)63U;
-  uint64_t t109 = (b5 << (uint32_t)56U) + r0 - qmul0;
+  uint64_t b5 = (r0 - qmul0) >> 63U;
+  uint64_t t109 = (b5 << 56U) + r0 - qmul0;
   uint64_t c1 = b5;
   uint64_t t011 = t109;
-  uint64_t b6 = (r1 - (qmul1 + c1)) >> (uint32_t)63U;
-  uint64_t t1010 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1);
+  uint64_t b6 = (r1 - (qmul1 + c1)) >> 63U;
+  uint64_t t1010 = (b6 << 56U) + r1 - (qmul1 + c1);
   uint64_t c2 = b6;
   uint64_t t111 = t1010;
-  uint64_t b7 = (r2 - (qmul2 + c2)) >> (uint32_t)63U;
-  uint64_t t1011 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2);
+  uint64_t b7 = (r2 - (qmul2 + c2)) >> 63U;
+  uint64_t t1011 = (b7 << 56U) + r2 - (qmul2 + c2);
   uint64_t c3 = b7;
   uint64_t t211 = t1011;
-  uint64_t b8 = (r3 - (qmul3 + c3)) >> (uint32_t)63U;
-  uint64_t t1012 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3);
+  uint64_t b8 = (r3 - (qmul3 + c3)) >> 63U;
+  uint64_t t1012 = (b8 << 56U) + r3 - (qmul3 + c3);
   uint64_t c4 = b8;
   uint64_t t311 = t1012;
-  uint64_t b9 = (r4 - (qmul4 + c4)) >> (uint32_t)63U;
-  uint64_t t1013 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4);
+  uint64_t b9 = (r4 - (qmul4 + c4)) >> 63U;
+  uint64_t t1013 = (b9 << 40U) + r4 - (qmul4 + c4);
   uint64_t t412 = t1013;
   uint64_t s0 = t011;
   uint64_t s1 = t111;
   uint64_t s2 = t211;
   uint64_t s3 = t311;
   uint64_t s4 = t412;
-  uint64_t m01 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m11 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m21 = (uint64_t)0x000000000014deU;
-  uint64_t m31 = (uint64_t)0x00000000000000U;
-  uint64_t m41 = (uint64_t)0x00000010000000U;
+  uint64_t m01 = 0x12631a5cf5d3edULL;
+  uint64_t m11 = 0xf9dea2f79cd658ULL;
+  uint64_t m21 = 0x000000000014deULL;
+  uint64_t m31 = 0x00000000000000ULL;
+  uint64_t m41 = 0x00000010000000ULL;
   uint64_t y0 = m01;
   uint64_t y1 = m11;
   uint64_t y2 = m21;
   uint64_t y3 = m31;
   uint64_t y4 = m41;
-  uint64_t b10 = (s0 - y0) >> (uint32_t)63U;
-  uint64_t t1014 = (b10 << (uint32_t)56U) + s0 - y0;
+  uint64_t b10 = (s0 - y0) >> 63U;
+  uint64_t t1014 = (b10 << 56U) + s0 - y0;
   uint64_t b0 = b10;
   uint64_t t01 = t1014;
-  uint64_t b11 = (s1 - (y1 + b0)) >> (uint32_t)63U;
-  uint64_t t1015 = (b11 << (uint32_t)56U) + s1 - (y1 + b0);
+  uint64_t b11 = (s1 - (y1 + b0)) >> 63U;
+  uint64_t t1015 = (b11 << 56U) + s1 - (y1 + b0);
   uint64_t b1 = b11;
   uint64_t t11 = t1015;
-  uint64_t b12 = (s2 - (y2 + b1)) >> (uint32_t)63U;
-  uint64_t t1016 = (b12 << (uint32_t)56U) + s2 - (y2 + b1);
+  uint64_t b12 = (s2 - (y2 + b1)) >> 63U;
+  uint64_t t1016 = (b12 << 56U) + s2 - (y2 + b1);
   uint64_t b2 = b12;
   uint64_t t21 = t1016;
-  uint64_t b13 = (s3 - (y3 + b2)) >> (uint32_t)63U;
-  uint64_t t1017 = (b13 << (uint32_t)56U) + s3 - (y3 + b2);
+  uint64_t b13 = (s3 - (y3 + b2)) >> 63U;
+  uint64_t t1017 = (b13 << 56U) + s3 - (y3 + b2);
   uint64_t b3 = b13;
   uint64_t t31 = t1017;
-  uint64_t b = (s4 - (y4 + b3)) >> (uint32_t)63U;
-  uint64_t t10 = (b << (uint32_t)56U) + s4 - (y4 + b3);
+  uint64_t b = (s4 - (y4 + b3)) >> 63U;
+  uint64_t t10 = (b << 56U) + s4 - (y4 + b3);
   uint64_t b4 = b;
   uint64_t t41 = t10;
-  uint64_t mask = b4 - (uint64_t)1U;
+  uint64_t mask = b4 - 1ULL;
   uint64_t z03 = s0 ^ (mask & (s0 ^ t01));
   uint64_t z13 = s1 ^ (mask & (s1 ^ t11));
   uint64_t z23 = s2 ^ (mask & (s2 ^ t21));
@@ -1008,72 +980,48 @@ static inline void mul_modq(uint64_t *out, uint64_t *x, uint64_t *y)
   FStar_UInt128_uint128 z60 = FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy24, xy33), xy42);
   FStar_UInt128_uint128 z70 = FStar_UInt128_add_mod(xy34, xy43);
   FStar_UInt128_uint128 z80 = xy44;
-  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, (uint32_t)56U);
-  uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & (uint64_t)0xffffffffffffffU;
+  FStar_UInt128_uint128 carry0 = FStar_UInt128_shift_right(z00, 56U);
+  uint64_t t10 = FStar_UInt128_uint128_to_uint64(z00) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c0 = carry0;
   uint64_t t0 = t10;
-  FStar_UInt128_uint128
-  carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), (uint32_t)56U);
+  FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z10, c0), 56U);
   uint64_t
-  t11 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0))
-    & (uint64_t)0xffffffffffffffU;
+  t11 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z10, c0)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c1 = carry1;
   uint64_t t1 = t11;
-  FStar_UInt128_uint128
-  carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), (uint32_t)56U);
+  FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z20, c1), 56U);
   uint64_t
-  t12 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1))
-    & (uint64_t)0xffffffffffffffU;
+  t12 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z20, c1)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c2 = carry2;
   uint64_t t2 = t12;
-  FStar_UInt128_uint128
-  carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), (uint32_t)56U);
+  FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z30, c2), 56U);
   uint64_t
-  t13 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2))
-    & (uint64_t)0xffffffffffffffU;
+  t13 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z30, c2)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c3 = carry3;
   uint64_t t3 = t13;
-  FStar_UInt128_uint128
-  carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), (uint32_t)56U);
+  FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z40, c3), 56U);
   uint64_t
-  t14 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3))
-    & (uint64_t)0xffffffffffffffU;
+  t14 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z40, c3)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c4 = carry4;
   uint64_t t4 = t14;
-  FStar_UInt128_uint128
-  carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), (uint32_t)56U);
+  FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z50, c4), 56U);
   uint64_t
-  t15 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4))
-    & (uint64_t)0xffffffffffffffU;
+  t15 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z50, c4)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c5 = carry5;
   uint64_t t5 = t15;
-  FStar_UInt128_uint128
-  carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), (uint32_t)56U);
+  FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z60, c5), 56U);
   uint64_t
-  t16 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5))
-    & (uint64_t)0xffffffffffffffU;
+  t16 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z60, c5)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c6 = carry6;
   uint64_t t6 = t16;
-  FStar_UInt128_uint128
-  carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), (uint32_t)56U);
+  FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z70, c6), 56U);
   uint64_t
-  t17 =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6))
-    & (uint64_t)0xffffffffffffffU;
+  t17 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z70, c6)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c7 = carry7;
   uint64_t t7 = t17;
-  FStar_UInt128_uint128
-  carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), (uint32_t)56U);
+  FStar_UInt128_uint128 carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z80, c7), 56U);
   uint64_t
-  t =
-    FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7))
-    & (uint64_t)0xffffffffffffffU;
+  t = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z80, c7)) & 0xffffffffffffffULL;
   FStar_UInt128_uint128 c8 = carry;
   uint64_t t8 = t;
   uint64_t t9 = FStar_UInt128_uint128_to_uint64(c8);
@@ -1112,54 +1060,54 @@ static inline void add_modq(uint64_t *out, uint64_t *x, uint64_t *y)
   uint64_t y2 = y[2U];
   uint64_t y3 = y[3U];
   uint64_t y4 = y[4U];
-  uint64_t carry0 = (x0 + y0) >> (uint32_t)56U;
-  uint64_t t0 = (x0 + y0) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry0 = (x0 + y0) >> 56U;
+  uint64_t t0 = (x0 + y0) & 0xffffffffffffffULL;
   uint64_t t00 = t0;
   uint64_t c0 = carry0;
-  uint64_t carry1 = (x1 + y1 + c0) >> (uint32_t)56U;
-  uint64_t t1 = (x1 + y1 + c0) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry1 = (x1 + y1 + c0) >> 56U;
+  uint64_t t1 = (x1 + y1 + c0) & 0xffffffffffffffULL;
   uint64_t t10 = t1;
   uint64_t c1 = carry1;
-  uint64_t carry2 = (x2 + y2 + c1) >> (uint32_t)56U;
-  uint64_t t2 = (x2 + y2 + c1) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry2 = (x2 + y2 + c1) >> 56U;
+  uint64_t t2 = (x2 + y2 + c1) & 0xffffffffffffffULL;
   uint64_t t20 = t2;
   uint64_t c2 = carry2;
-  uint64_t carry = (x3 + y3 + c2) >> (uint32_t)56U;
-  uint64_t t3 = (x3 + y3 + c2) & (uint64_t)0xffffffffffffffU;
+  uint64_t carry = (x3 + y3 + c2) >> 56U;
+  uint64_t t3 = (x3 + y3 + c2) & 0xffffffffffffffULL;
   uint64_t t30 = t3;
   uint64_t c3 = carry;
   uint64_t t4 = x4 + y4 + c3;
-  uint64_t m0 = (uint64_t)0x12631a5cf5d3edU;
-  uint64_t m1 = (uint64_t)0xf9dea2f79cd658U;
-  uint64_t m2 = (uint64_t)0x000000000014deU;
-  uint64_t m3 = (uint64_t)0x00000000000000U;
-  uint64_t m4 = (uint64_t)0x00000010000000U;
+  uint64_t m0 = 0x12631a5cf5d3edULL;
+  uint64_t m1 = 0xf9dea2f79cd658ULL;
+  uint64_t m2 = 0x000000000014deULL;
+  uint64_t m3 = 0x00000000000000ULL;
+  uint64_t m4 = 0x00000010000000ULL;
   uint64_t y01 = m0;
   uint64_t y11 = m1;
   uint64_t y21 = m2;
   uint64_t y31 = m3;
   uint64_t y41 = m4;
-  uint64_t b5 = (t00 - y01) >> (uint32_t)63U;
-  uint64_t t5 = (b5 << (uint32_t)56U) + t00 - y01;
+  uint64_t b5 = (t00 - y01) >> 63U;
+  uint64_t t5 = (b5 << 56U) + t00 - y01;
   uint64_t b0 = b5;
   uint64_t t01 = t5;
-  uint64_t b6 = (t10 - (y11 + b0)) >> (uint32_t)63U;
-  uint64_t t6 = (b6 << (uint32_t)56U) + t10 - (y11 + b0);
+  uint64_t b6 = (t10 - (y11 + b0)) >> 63U;
+  uint64_t t6 = (b6 << 56U) + t10 - (y11 + b0);
   uint64_t b1 = b6;
   uint64_t t11 = t6;
-  uint64_t b7 = (t20 - (y21 + b1)) >> (uint32_t)63U;
-  uint64_t t7 = (b7 << (uint32_t)56U) + t20 - (y21 + b1);
+  uint64_t b7 = (t20 - (y21 + b1)) >> 63U;
+  uint64_t t7 = (b7 << 56U) + t20 - (y21 + b1);
   uint64_t b2 = b7;
   uint64_t t21 = t7;
-  uint64_t b8 = (t30 - (y31 + b2)) >> (uint32_t)63U;
-  uint64_t t8 = (b8 << (uint32_t)56U) + t30 - (y31 + b2);
+  uint64_t b8 = (t30 - (y31 + b2)) >> 63U;
+  uint64_t t8 = (b8 << 56U) + t30 - (y31 + b2);
   uint64_t b3 = b8;
   uint64_t t31 = t8;
-  uint64_t b = (t4 - (y41 + b3)) >> (uint32_t)63U;
-  uint64_t t = (b << (uint32_t)56U) + t4 - (y41 + b3);
+  uint64_t b = (t4 - (y41 + b3)) >> 63U;
+  uint64_t t = (b << 56U) + t4 - (y41 + b3);
   uint64_t b4 = b;
   uint64_t t41 = t;
-  uint64_t mask = b4 - (uint64_t)1U;
+  uint64_t mask = b4 - 1ULL;
   uint64_t z00 = t00 ^ (mask & (t00 ^ t01));
   uint64_t z10 = t10 ^ (mask & (t10 ^ t11));
   uint64_t z20 = t20 ^ (mask & (t20 ^ t21));
@@ -1194,35 +1142,35 @@ static inline bool gte_q(uint64_t *s)
   uint64_t s2 = s[2U];
   uint64_t s3 = s[3U];
   uint64_t s4 = s[4U];
-  if (s4 > (uint64_t)0x00000010000000U)
+  if (s4 > 0x00000010000000ULL)
   {
     return true;
   }
-  if (s4 < (uint64_t)0x00000010000000U)
+  if (s4 < 0x00000010000000ULL)
   {
     return false;
   }
-  if (s3 > (uint64_t)0x00000000000000U)
+  if (s3 > 0x00000000000000ULL)
   {
     return true;
   }
-  if (s2 > (uint64_t)0x000000000014deU)
+  if (s2 > 0x000000000014deULL)
   {
     return true;
   }
-  if (s2 < (uint64_t)0x000000000014deU)
+  if (s2 < 0x000000000014deULL)
   {
     return false;
   }
-  if (s1 > (uint64_t)0xf9dea2f79cd658U)
+  if (s1 > 0xf9dea2f79cd658ULL)
   {
     return true;
   }
-  if (s1 < (uint64_t)0xf9dea2f79cd658U)
+  if (s1 < 0xf9dea2f79cd658ULL)
   {
     return false;
   }
-  if (s0 >= (uint64_t)0x12631a5cf5d3edU)
+  if (s0 >= 0x12631a5cf5d3edULL)
   {
     return true;
   }
@@ -1248,19 +1196,19 @@ bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *pxqz = tmp;
-  uint64_t *qxpz = tmp + (uint32_t)5U;
-  fmul0(pxqz, p, q + (uint32_t)10U);
+  uint64_t *qxpz = tmp + 5U;
+  fmul0(pxqz, p, q + 10U);
   reduce(pxqz);
-  fmul0(qxpz, q, p + (uint32_t)10U);
+  fmul0(qxpz, q, p + 10U);
   reduce(qxpz);
   bool b = eq(pxqz, qxpz);
   if (b)
   {
-    uint64_t *pyqz = tmp + (uint32_t)10U;
-    uint64_t *qypz = tmp + (uint32_t)15U;
-    fmul0(pyqz, p + (uint32_t)5U, q + (uint32_t)10U);
+    uint64_t *pyqz = tmp + 10U;
+    uint64_t *qypz = tmp + 15U;
+    fmul0(pyqz, p + 5U, q + 10U);
     reduce(pyqz);
-    fmul0(qypz, q + (uint32_t)5U, p + (uint32_t)10U);
+    fmul0(qypz, q + 5U, p + 10U);
     reduce(qypz);
     return eq(pyqz, qypz);
   }
@@ -1270,23 +1218,23 @@ bool Hacl_Impl_Ed25519_PointEqual_point_equal(uint64_t *p, uint64_t *q)
 void Hacl_Impl_Ed25519_PointNegate_point_negate(uint64_t *p, uint64_t *out)
 {
   uint64_t zero[5U] = { 0U };
-  zero[0U] = (uint64_t)0U;
-  zero[1U] = (uint64_t)0U;
-  zero[2U] = (uint64_t)0U;
-  zero[3U] = (uint64_t)0U;
-  zero[4U] = (uint64_t)0U;
+  zero[0U] = 0ULL;
+  zero[1U] = 0ULL;
+  zero[2U] = 0ULL;
+  zero[3U] = 0ULL;
+  zero[4U] = 0ULL;
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)5U;
-  uint64_t *z = p + (uint32_t)10U;
-  uint64_t *t = p + (uint32_t)15U;
+  uint64_t *y = p + 5U;
+  uint64_t *z = p + 10U;
+  uint64_t *t = p + 15U;
   uint64_t *x1 = out;
-  uint64_t *y1 = out + (uint32_t)5U;
-  uint64_t *z1 = out + (uint32_t)10U;
-  uint64_t *t1 = out + (uint32_t)15U;
+  uint64_t *y1 = out + 5U;
+  uint64_t *z1 = out + 10U;
+  uint64_t *t1 = out + 15U;
   fdifference(x1, zero, x);
   Hacl_Bignum25519_reduce_513(x1);
-  memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-  memcpy(z1, z, (uint32_t)5U * sizeof (uint64_t));
+  memcpy(y1, y, 5U * sizeof (uint64_t));
+  memcpy(z1, z, 5U * sizeof (uint64_t));
   fdifference(t1, zero, t);
   Hacl_Bignum25519_reduce_513(t1);
 }
@@ -1295,11 +1243,11 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
 {
   uint64_t bscalar[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar;
-    uint8_t *bj = scalar + i * (uint32_t)8U;
+    uint8_t *bj = scalar + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -1307,42 +1255,34 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
   uint64_t table[320U] = { 0U };
   uint64_t tmp[20U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)20U;
+  uint64_t *t1 = table + 20U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0);
-  memcpy(t1, q, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(t1, q, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)20U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 20U;
     Hacl_Impl_Ed25519_PointDouble_point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U,
-      tmp,
-      (uint32_t)20U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U;
+    memcpy(table + (2U * i + 2U) * 20U, tmp, 20U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 20U;
     Hacl_Impl_Ed25519_PointAdd_point_add(tmp, q, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U,
-      tmp,
-      (uint32_t)20U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 20U, tmp, 20U * sizeof (uint64_t)););
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(out);
   uint64_t tmp0[20U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)20U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 20U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)20U;
-      for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++)
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 20U;
+      for (uint32_t i = 0U; i < 20U; i++)
       {
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -1354,14 +1294,14 @@ void Hacl_Impl_Ed25519_Ladder_point_mul(uint64_t *out, uint8_t *scalar, uint64_t
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)20U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)20U; i++)
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 20U;
+    for (uint32_t i = 0U; i < 20U; i++)
     {
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
@@ -1373,107 +1313,97 @@ static inline void point_mul_g(uint64_t *out, uint8_t *scalar)
 {
   uint64_t bscalar[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar;
-    uint8_t *bj = scalar + i * (uint32_t)8U;
+    uint8_t *bj = scalar + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   uint64_t q1[20U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  uint64_t *gt = q1 + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  uint64_t *gt = q1 + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
   uint64_t
   q2[20U] =
     {
-      (uint64_t)13559344787725U, (uint64_t)2051621493703448U, (uint64_t)1947659315640708U,
-      (uint64_t)626856790370168U, (uint64_t)1592804284034836U, (uint64_t)1781728767459187U,
-      (uint64_t)278818420518009U, (uint64_t)2038030359908351U, (uint64_t)910625973862690U,
-      (uint64_t)471887343142239U, (uint64_t)1298543306606048U, (uint64_t)794147365642417U,
-      (uint64_t)129968992326749U, (uint64_t)523140861678572U, (uint64_t)1166419653909231U,
-      (uint64_t)2009637196928390U, (uint64_t)1288020222395193U, (uint64_t)1007046974985829U,
-      (uint64_t)208981102651386U, (uint64_t)2074009315253380U
+      13559344787725ULL, 2051621493703448ULL, 1947659315640708ULL, 626856790370168ULL,
+      1592804284034836ULL, 1781728767459187ULL, 278818420518009ULL, 2038030359908351ULL,
+      910625973862690ULL, 471887343142239ULL, 1298543306606048ULL, 794147365642417ULL,
+      129968992326749ULL, 523140861678572ULL, 1166419653909231ULL, 2009637196928390ULL,
+      1288020222395193ULL, 1007046974985829ULL, 208981102651386ULL, 2074009315253380ULL
     };
   uint64_t
   q3[20U] =
     {
-      (uint64_t)557549315715710U, (uint64_t)196756086293855U, (uint64_t)846062225082495U,
-      (uint64_t)1865068224838092U, (uint64_t)991112090754908U, (uint64_t)522916421512828U,
-      (uint64_t)2098523346722375U, (uint64_t)1135633221747012U, (uint64_t)858420432114866U,
-      (uint64_t)186358544306082U, (uint64_t)1044420411868480U, (uint64_t)2080052304349321U,
-      (uint64_t)557301814716724U, (uint64_t)1305130257814057U, (uint64_t)2126012765451197U,
-      (uint64_t)1441004402875101U, (uint64_t)353948968859203U, (uint64_t)470765987164835U,
-      (uint64_t)1507675957683570U, (uint64_t)1086650358745097U
+      557549315715710ULL, 196756086293855ULL, 846062225082495ULL, 1865068224838092ULL,
+      991112090754908ULL, 522916421512828ULL, 2098523346722375ULL, 1135633221747012ULL,
+      858420432114866ULL, 186358544306082ULL, 1044420411868480ULL, 2080052304349321ULL,
+      557301814716724ULL, 1305130257814057ULL, 2126012765451197ULL, 1441004402875101ULL,
+      353948968859203ULL, 470765987164835ULL, 1507675957683570ULL, 1086650358745097ULL
     };
   uint64_t
   q4[20U] =
     {
-      (uint64_t)1129953239743101U, (uint64_t)1240339163956160U, (uint64_t)61002583352401U,
-      (uint64_t)2017604552196030U, (uint64_t)1576867829229863U, (uint64_t)1508654942849389U,
-      (uint64_t)270111619664077U, (uint64_t)1253097517254054U, (uint64_t)721798270973250U,
-      (uint64_t)161923365415298U, (uint64_t)828530877526011U, (uint64_t)1494851059386763U,
-      (uint64_t)662034171193976U, (uint64_t)1315349646974670U, (uint64_t)2199229517308806U,
-      (uint64_t)497078277852673U, (uint64_t)1310507715989956U, (uint64_t)1881315714002105U,
-      (uint64_t)2214039404983803U, (uint64_t)1331036420272667U
+      1129953239743101ULL, 1240339163956160ULL, 61002583352401ULL, 2017604552196030ULL,
+      1576867829229863ULL, 1508654942849389ULL, 270111619664077ULL, 1253097517254054ULL,
+      721798270973250ULL, 161923365415298ULL, 828530877526011ULL, 1494851059386763ULL,
+      662034171193976ULL, 1315349646974670ULL, 2199229517308806ULL, 497078277852673ULL,
+      1310507715989956ULL, 1881315714002105ULL, 2214039404983803ULL, 1331036420272667ULL
     };
   uint64_t *r1 = bscalar;
-  uint64_t *r2 = bscalar + (uint32_t)1U;
-  uint64_t *r3 = bscalar + (uint32_t)2U;
-  uint64_t *r4 = bscalar + (uint32_t)3U;
+  uint64_t *r2 = bscalar + 1U;
+  uint64_t *r3 = bscalar + 2U;
+  uint64_t *r4 = bscalar + 3U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(out);
   uint64_t tmp[20U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp););
-  KRML_HOST_IGNORE(q2);
-  KRML_HOST_IGNORE(q3);
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q2);
+  KRML_MAYBE_UNUSED_VAR(q3);
+  KRML_MAYBE_UNUSED_VAR(q4);
 }
 
 static inline void
@@ -1481,48 +1411,48 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *g = tmp;
-  uint64_t *bscalar1 = tmp + (uint32_t)20U;
-  uint64_t *bscalar2 = tmp + (uint32_t)24U;
+  uint64_t *bscalar1 = tmp + 20U;
+  uint64_t *bscalar2 = tmp + 24U;
   uint64_t *gx = g;
-  uint64_t *gy = g + (uint32_t)5U;
-  uint64_t *gz = g + (uint32_t)10U;
-  uint64_t *gt = g + (uint32_t)15U;
-  gx[0U] = (uint64_t)0x00062d608f25d51aU;
-  gx[1U] = (uint64_t)0x000412a4b4f6592aU;
-  gx[2U] = (uint64_t)0x00075b7171a4b31dU;
-  gx[3U] = (uint64_t)0x0001ff60527118feU;
-  gx[4U] = (uint64_t)0x000216936d3cd6e5U;
-  gy[0U] = (uint64_t)0x0006666666666658U;
-  gy[1U] = (uint64_t)0x0004ccccccccccccU;
-  gy[2U] = (uint64_t)0x0001999999999999U;
-  gy[3U] = (uint64_t)0x0003333333333333U;
-  gy[4U] = (uint64_t)0x0006666666666666U;
-  gz[0U] = (uint64_t)1U;
-  gz[1U] = (uint64_t)0U;
-  gz[2U] = (uint64_t)0U;
-  gz[3U] = (uint64_t)0U;
-  gz[4U] = (uint64_t)0U;
-  gt[0U] = (uint64_t)0x00068ab3a5b7dda3U;
-  gt[1U] = (uint64_t)0x00000eea2a5eadbbU;
-  gt[2U] = (uint64_t)0x0002af8df483c27eU;
-  gt[3U] = (uint64_t)0x000332b375274732U;
-  gt[4U] = (uint64_t)0x00067875f0fd78b7U;
+  uint64_t *gy = g + 5U;
+  uint64_t *gz = g + 10U;
+  uint64_t *gt = g + 15U;
+  gx[0U] = 0x00062d608f25d51aULL;
+  gx[1U] = 0x000412a4b4f6592aULL;
+  gx[2U] = 0x00075b7171a4b31dULL;
+  gx[3U] = 0x0001ff60527118feULL;
+  gx[4U] = 0x000216936d3cd6e5ULL;
+  gy[0U] = 0x0006666666666658ULL;
+  gy[1U] = 0x0004ccccccccccccULL;
+  gy[2U] = 0x0001999999999999ULL;
+  gy[3U] = 0x0003333333333333ULL;
+  gy[4U] = 0x0006666666666666ULL;
+  gz[0U] = 1ULL;
+  gz[1U] = 0ULL;
+  gz[2U] = 0ULL;
+  gz[3U] = 0ULL;
+  gz[4U] = 0ULL;
+  gt[0U] = 0x00068ab3a5b7dda3ULL;
+  gt[1U] = 0x00000eea2a5eadbbULL;
+  gt[2U] = 0x0002af8df483c27eULL;
+  gt[3U] = 0x000332b375274732ULL;
+  gt[4U] = 0x00067875f0fd78b7ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar1;
-    uint8_t *bj = scalar1 + i * (uint32_t)8U;
+    uint8_t *bj = scalar1 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = bscalar2;
-    uint8_t *bj = scalar2 + i * (uint32_t)8U;
+    uint8_t *bj = scalar2 + i * 8U;
     uint64_t u = load64_le(bj);
     uint64_t r = u;
     uint64_t x = r;
@@ -1530,58 +1460,50 @@ point_mul_g_double_vartime(uint64_t *out, uint8_t *scalar1, uint8_t *scalar2, ui
   uint64_t table2[640U] = { 0U };
   uint64_t tmp1[20U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)20U;
+  uint64_t *t1 = table2 + 20U;
   Hacl_Impl_Ed25519_PointConstants_make_point_inf(t0);
-  memcpy(t1, q2, (uint32_t)20U * sizeof (uint64_t));
+  memcpy(t1, q2, 20U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)20U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 20U;
     Hacl_Impl_Ed25519_PointDouble_point_double(tmp1, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U,
-      tmp1,
-      (uint32_t)20U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)20U;
+    memcpy(table2 + (2U * i + 2U) * 20U, tmp1, 20U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 20U;
     Hacl_Impl_Ed25519_PointAdd_point_add(tmp1, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)20U,
-      tmp1,
-      (uint32_t)20U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 20U, tmp1, 20U * sizeof (uint64_t)););
   uint64_t tmp10[20U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
   const
   uint64_t
-  *a_bits_l = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)20U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)20U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, i1, (uint32_t)5U);
+  *a_bits_l = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 20U;
+  memcpy(out, (uint64_t *)a_bits_l, 20U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)20U;
-  memcpy(tmp10, (uint64_t *)a_bits_l0, (uint32_t)20U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 20U;
+  memcpy(tmp10, (uint64_t *)a_bits_l0, 20U * sizeof (uint64_t));
   Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp10);
   uint64_t tmp11[20U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_Ed25519_PointDouble_point_double(out, out););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)20U;
-    memcpy(tmp11, (uint64_t *)a_bits_l1, (uint32_t)20U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 20U;
+    memcpy(tmp11, (uint64_t *)a_bits_l1, 20U * sizeof (uint64_t));
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, bscalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, bscalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)20U;
-    memcpy(tmp11, (uint64_t *)a_bits_l2, (uint32_t)20U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_Ed25519_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 20U;
+    memcpy(tmp11, (uint64_t *)a_bits_l2, 20U * sizeof (uint64_t));
     Hacl_Impl_Ed25519_PointAdd_point_add(out, out, tmp11);
   }
 }
@@ -1609,13 +1531,13 @@ static inline void store_56(uint8_t *out, uint64_t *b)
   uint32_t b4_ = (uint32_t)b4;
   uint8_t *b8 = out;
   store64_le(b8, b0);
-  uint8_t *b80 = out + (uint32_t)7U;
+  uint8_t *b80 = out + 7U;
   store64_le(b80, b1);
-  uint8_t *b81 = out + (uint32_t)14U;
+  uint8_t *b81 = out + 14U;
   store64_le(b81, b2);
-  uint8_t *b82 = out + (uint32_t)21U;
+  uint8_t *b82 = out + 21U;
   store64_le(b82, b3);
-  store32_le(out + (uint32_t)28U, b4_);
+  store32_le(out + 28U, b4_);
 }
 
 static inline void load_64_bytes(uint64_t *out, uint8_t *b)
@@ -1623,39 +1545,39 @@ static inline void load_64_bytes(uint64_t *out, uint8_t *b)
   uint8_t *b80 = b;
   uint64_t u = load64_le(b80);
   uint64_t z = u;
-  uint64_t b0 = z & (uint64_t)0xffffffffffffffU;
-  uint8_t *b81 = b + (uint32_t)7U;
+  uint64_t b0 = z & 0xffffffffffffffULL;
+  uint8_t *b81 = b + 7U;
   uint64_t u0 = load64_le(b81);
   uint64_t z0 = u0;
-  uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b82 = b + (uint32_t)14U;
+  uint64_t b1 = z0 & 0xffffffffffffffULL;
+  uint8_t *b82 = b + 14U;
   uint64_t u1 = load64_le(b82);
   uint64_t z1 = u1;
-  uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b83 = b + (uint32_t)21U;
+  uint64_t b2 = z1 & 0xffffffffffffffULL;
+  uint8_t *b83 = b + 21U;
   uint64_t u2 = load64_le(b83);
   uint64_t z2 = u2;
-  uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b84 = b + (uint32_t)28U;
+  uint64_t b3 = z2 & 0xffffffffffffffULL;
+  uint8_t *b84 = b + 28U;
   uint64_t u3 = load64_le(b84);
   uint64_t z3 = u3;
-  uint64_t b4 = z3 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b85 = b + (uint32_t)35U;
+  uint64_t b4 = z3 & 0xffffffffffffffULL;
+  uint8_t *b85 = b + 35U;
   uint64_t u4 = load64_le(b85);
   uint64_t z4 = u4;
-  uint64_t b5 = z4 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b86 = b + (uint32_t)42U;
+  uint64_t b5 = z4 & 0xffffffffffffffULL;
+  uint8_t *b86 = b + 42U;
   uint64_t u5 = load64_le(b86);
   uint64_t z5 = u5;
-  uint64_t b6 = z5 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b87 = b + (uint32_t)49U;
+  uint64_t b6 = z5 & 0xffffffffffffffULL;
+  uint8_t *b87 = b + 49U;
   uint64_t u6 = load64_le(b87);
   uint64_t z6 = u6;
-  uint64_t b7 = z6 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b8 = b + (uint32_t)56U;
+  uint64_t b7 = z6 & 0xffffffffffffffULL;
+  uint8_t *b8 = b + 56U;
   uint64_t u7 = load64_le(b8);
   uint64_t z7 = u7;
-  uint64_t b88 = z7 & (uint64_t)0xffffffffffffffU;
+  uint64_t b88 = z7 & 0xffffffffffffffULL;
   uint8_t b63 = b[63U];
   uint64_t b9 = (uint64_t)b63;
   out[0U] = b0;
@@ -1675,20 +1597,20 @@ static inline void load_32_bytes(uint64_t *out, uint8_t *b)
   uint8_t *b80 = b;
   uint64_t u0 = load64_le(b80);
   uint64_t z = u0;
-  uint64_t b0 = z & (uint64_t)0xffffffffffffffU;
-  uint8_t *b81 = b + (uint32_t)7U;
+  uint64_t b0 = z & 0xffffffffffffffULL;
+  uint8_t *b81 = b + 7U;
   uint64_t u1 = load64_le(b81);
   uint64_t z0 = u1;
-  uint64_t b1 = z0 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b82 = b + (uint32_t)14U;
+  uint64_t b1 = z0 & 0xffffffffffffffULL;
+  uint8_t *b82 = b + 14U;
   uint64_t u2 = load64_le(b82);
   uint64_t z1 = u2;
-  uint64_t b2 = z1 & (uint64_t)0xffffffffffffffU;
-  uint8_t *b8 = b + (uint32_t)21U;
+  uint64_t b2 = z1 & 0xffffffffffffffULL;
+  uint8_t *b8 = b + 21U;
   uint64_t u3 = load64_le(b8);
   uint64_t z2 = u3;
-  uint64_t b3 = z2 & (uint64_t)0xffffffffffffffU;
-  uint32_t u = load32_le(b + (uint32_t)28U);
+  uint64_t b3 = z2 & 0xffffffffffffffULL;
+  uint32_t u = load32_le(b + 28U);
   uint32_t b4 = u;
   uint64_t b41 = (uint64_t)b4;
   out[0U] = b0;
@@ -1703,16 +1625,15 @@ static inline void sha512_pre_msg(uint8_t *hash, uint8_t *prefix, uint32_t len,
   uint8_t buf[128U] = { 0U };
   uint64_t block_state[8U] = { 0U };
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64 p = s;
-  Hacl_SHA2_Scalar32_sha512_init(block_state);
+  Hacl_Hash_SHA2_sha512_init(block_state);
   Hacl_Streaming_MD_state_64 *st = &p;
-  Hacl_Streaming_Types_error_code
-  err0 = Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U);
-  Hacl_Streaming_Types_error_code err1 = Hacl_Streaming_SHA2_update_512(st, input, len);
-  KRML_HOST_IGNORE(err0);
-  KRML_HOST_IGNORE(err1);
-  Hacl_Streaming_SHA2_finish_512(st, hash);
+  Hacl_Streaming_Types_error_code err0 = Hacl_Hash_SHA2_update_512(st, prefix, 32U);
+  Hacl_Streaming_Types_error_code err1 = Hacl_Hash_SHA2_update_512(st, input, len);
+  KRML_MAYBE_UNUSED_VAR(err0);
+  KRML_MAYBE_UNUSED_VAR(err1);
+  Hacl_Hash_SHA2_digest_512(st, hash);
 }
 
 static inline void
@@ -1727,19 +1648,17 @@ sha512_pre_pre2_msg(
   uint8_t buf[128U] = { 0U };
   uint64_t block_state[8U] = { 0U };
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64 p = s;
-  Hacl_SHA2_Scalar32_sha512_init(block_state);
+  Hacl_Hash_SHA2_sha512_init(block_state);
   Hacl_Streaming_MD_state_64 *st = &p;
-  Hacl_Streaming_Types_error_code
-  err0 = Hacl_Streaming_SHA2_update_512(st, prefix, (uint32_t)32U);
-  Hacl_Streaming_Types_error_code
-  err1 = Hacl_Streaming_SHA2_update_512(st, prefix2, (uint32_t)32U);
-  Hacl_Streaming_Types_error_code err2 = Hacl_Streaming_SHA2_update_512(st, input, len);
-  KRML_HOST_IGNORE(err0);
-  KRML_HOST_IGNORE(err1);
-  KRML_HOST_IGNORE(err2);
-  Hacl_Streaming_SHA2_finish_512(st, hash);
+  Hacl_Streaming_Types_error_code err0 = Hacl_Hash_SHA2_update_512(st, prefix, 32U);
+  Hacl_Streaming_Types_error_code err1 = Hacl_Hash_SHA2_update_512(st, prefix2, 32U);
+  Hacl_Streaming_Types_error_code err2 = Hacl_Hash_SHA2_update_512(st, input, len);
+  KRML_MAYBE_UNUSED_VAR(err0);
+  KRML_MAYBE_UNUSED_VAR(err1);
+  KRML_MAYBE_UNUSED_VAR(err2);
+  Hacl_Hash_SHA2_digest_512(st, hash);
 }
 
 static inline void
@@ -1777,12 +1696,12 @@ static inline void point_mul_g_compress(uint8_t *out, uint8_t *s)
 
 static inline void secret_expand(uint8_t *expanded, uint8_t *secret)
 {
-  Hacl_Streaming_SHA2_hash_512(secret, (uint32_t)32U, expanded);
+  Hacl_Hash_SHA2_hash_512(expanded, secret, 32U);
   uint8_t *h_low = expanded;
   uint8_t h_low0 = h_low[0U];
   uint8_t h_low31 = h_low[31U];
-  h_low[0U] = h_low0 & (uint8_t)0xf8U;
-  h_low[31U] = (h_low31 & (uint8_t)127U) | (uint8_t)64U;
+  h_low[0U] = (uint32_t)h_low0 & 0xf8U;
+  h_low[31U] = ((uint32_t)h_low31 & 127U) | 64U;
 }
 
 /********************************************************************************
@@ -1816,8 +1735,8 @@ Compute the expanded keys for an Ed25519 signature.
 void Hacl_Ed25519_expand_keys(uint8_t *expanded_keys, uint8_t *private_key)
 {
   uint8_t *public_key = expanded_keys;
-  uint8_t *s_prefix = expanded_keys + (uint32_t)32U;
-  uint8_t *s = expanded_keys + (uint32_t)32U;
+  uint8_t *s_prefix = expanded_keys + 32U;
+  uint8_t *s = expanded_keys + 32U;
   secret_expand(s_prefix, private_key);
   point_mul_g_compress(public_key, s);
 }
@@ -1843,13 +1762,13 @@ Hacl_Ed25519_sign_expanded(
 )
 {
   uint8_t *rs = signature;
-  uint8_t *ss = signature + (uint32_t)32U;
+  uint8_t *ss = signature + 32U;
   uint64_t rq[5U] = { 0U };
   uint64_t hq[5U] = { 0U };
   uint8_t rb[32U] = { 0U };
   uint8_t *public_key = expanded_keys;
-  uint8_t *s = expanded_keys + (uint32_t)32U;
-  uint8_t *prefix = expanded_keys + (uint32_t)64U;
+  uint8_t *s = expanded_keys + 32U;
+  uint8_t *prefix = expanded_keys + 64U;
   sha512_modq_pre(rq, prefix, msg_len, msg);
   store_56(rb, rq);
   point_mul_g_compress(rs, rb);
@@ -1904,7 +1823,7 @@ Hacl_Ed25519_verify(uint8_t *public_key, uint32_t msg_len, uint8_t *msg, uint8_t
     {
       uint8_t hb[32U] = { 0U };
       uint8_t *rs1 = signature;
-      uint8_t *sb = signature + (uint32_t)32U;
+      uint8_t *sb = signature + 32U;
       uint64_t tmp[5U] = { 0U };
       load_32_bytes(tmp, sb);
       bool b1 = gte_q(tmp);
diff --git a/src/msvc/Hacl_FFDHE.c b/src/msvc/Hacl_FFDHE.c
index bc77dbdc..7f28cda0 100644
--- a/src/msvc/Hacl_FFDHE.c
+++ b/src/msvc/Hacl_FFDHE.c
@@ -35,23 +35,23 @@ static inline uint32_t ffdhe_len(Spec_FFDHE_ffdhe_alg a)
   {
     case Spec_FFDHE_FFDHE2048:
       {
-        return (uint32_t)256U;
+        return 256U;
       }
     case Spec_FFDHE_FFDHE3072:
       {
-        return (uint32_t)384U;
+        return 384U;
       }
     case Spec_FFDHE_FFDHE4096:
       {
-        return (uint32_t)512U;
+        return 512U;
       }
     case Spec_FFDHE_FFDHE6144:
       {
-        return (uint32_t)768U;
+        return 768U;
       }
     case Spec_FFDHE_FFDHE8192:
       {
-        return (uint32_t)1024U;
+        return 1024U;
       }
     default:
       {
@@ -63,7 +63,7 @@ static inline uint32_t ffdhe_len(Spec_FFDHE_ffdhe_alg a)
 
 static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   uint64_t *r2_n = p_r2_n + nLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), ffdhe_len(a));
@@ -104,88 +104,80 @@ static inline void ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a, uint64_t *p_r2_n)
       }
   }
   uint32_t len = ffdhe_len(a);
-  for (uint32_t i = (uint32_t)0U; i < len; i++)
+  for (uint32_t i = 0U; i < len; i++)
   {
     uint8_t *os = p_s;
     uint8_t x = p[i];
     os[i] = x;
   }
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ffdhe_len(a), p_s, p_n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((ffdhe_len(a) - (uint32_t)1U)
-    / (uint32_t)8U
-    + (uint32_t)1U,
-    (uint32_t)8U * ffdhe_len(a) - (uint32_t)1U,
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((ffdhe_len(a) - 1U) / 8U + 1U,
+    8U * ffdhe_len(a) - 1U,
     p_n,
     r2_n);
 }
 
 static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, uint64_t *p_n)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *p_n1 = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(p_n1, 0U, nLen * sizeof (uint64_t));
-  uint64_t
-  c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, p_n[0U], (uint64_t)1U, p_n1);
-  if ((uint32_t)1U < nLen)
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, p_n[0U], 1ULL, p_n1);
+  if (1U < nLen)
   {
-    uint64_t *a1 = p_n + (uint32_t)1U;
-    uint64_t *res1 = p_n1 + (uint32_t)1U;
+    uint64_t *a1 = p_n + 1U;
+    uint64_t *res1 = p_n1 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (nLen - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (nLen - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (nLen - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < nLen - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (nLen - 1U) / 4U * 4U; i < nLen - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c1 = c;
-    KRML_HOST_IGNORE(c1);
+    KRML_MAYBE_UNUSED_VAR(c1);
   }
   else
   {
-    KRML_HOST_IGNORE(c0);
+    KRML_MAYBE_UNUSED_VAR(c0);
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *b2 = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(b2, 0U, nLen * sizeof (uint64_t));
-  uint32_t i0 = (uint32_t)0U;
-  uint32_t j = (uint32_t)0U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint32_t i0 = 0U;
+  uint32_t j = 0U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc0 = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b2[i], pk_n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], pk_n[i]);
-    acc0 = (beq & acc0) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc0 = (beq & acc0) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc0;
   uint64_t m0 = res;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(pk_n[i], p_n1[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(pk_n[i], p_n1[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t m1 = acc;
   return m0 & m1;
@@ -200,21 +192,19 @@ ffdhe_compute_exp(
   uint8_t *res
 )
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   uint64_t *r2_n = p_r2_n + nLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *res_n = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(res_n, 0U, nLen * sizeof (uint64_t));
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(p_n[0U]);
-  Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((ffdhe_len(a) - (uint32_t)1U)
-    / (uint32_t)8U
-    + (uint32_t)1U,
+  Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((ffdhe_len(a) - 1U) / 8U + 1U,
     p_n,
     mu,
     r2_n,
     b_n,
-    (uint32_t)64U * nLen,
+    64U * nLen,
     sk_n,
     res_n);
   Hacl_Bignum_Convert_bn_to_bytes_be_uint64(ffdhe_len(a), res_n, res);
@@ -227,7 +217,7 @@ uint32_t Hacl_FFDHE_ffdhe_len(Spec_FFDHE_ffdhe_alg a)
 
 uint64_t *Hacl_FFDHE_new_ffdhe_precomp_p(Spec_FFDHE_ffdhe_alg a)
 {
-  uint32_t nLen = (ffdhe_len(a) - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (ffdhe_len(a) - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t *res = (uint64_t *)KRML_HOST_CALLOC(nLen + nLen, sizeof (uint64_t));
   if (res == NULL)
@@ -249,17 +239,17 @@ Hacl_FFDHE_ffdhe_secret_to_public_precomp(
 )
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *g_n = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(g_n, 0U, nLen * sizeof (uint64_t));
-  uint8_t g = (uint8_t)0U;
+  uint8_t g = 0U;
   {
     uint8_t *os = &g;
     uint8_t x = Hacl_Impl_FFDHE_Constants_ffdhe_g2[0U];
     os[0U] = x;
   }
-  Hacl_Bignum_Convert_bn_from_bytes_be_uint64((uint32_t)1U, &g, g_n);
+  Hacl_Bignum_Convert_bn_from_bytes_be_uint64(1U, &g, g_n);
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *sk_n = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(sk_n, 0U, nLen * sizeof (uint64_t));
@@ -270,7 +260,7 @@ Hacl_FFDHE_ffdhe_secret_to_public_precomp(
 void Hacl_FFDHE_ffdhe_secret_to_public(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk)
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t *p_r2_n = (uint64_t *)alloca((nLen + nLen) * sizeof (uint64_t));
   memset(p_r2_n, 0U, (nLen + nLen) * sizeof (uint64_t));
@@ -288,7 +278,7 @@ Hacl_FFDHE_ffdhe_shared_secret_precomp(
 )
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   uint64_t *p_n = p_r2_n;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *sk_n = (uint64_t *)alloca(nLen * sizeof (uint64_t));
@@ -299,7 +289,7 @@ Hacl_FFDHE_ffdhe_shared_secret_precomp(
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, sk, sk_n);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(len, pk, pk_n);
   uint64_t m = ffdhe_check_pk(a, pk_n, p_n);
-  if (m == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+  if (m == 0xFFFFFFFFFFFFFFFFULL)
   {
     ffdhe_compute_exp(a, p_r2_n, sk_n, pk_n, ss);
   }
@@ -310,7 +300,7 @@ uint64_t
 Hacl_FFDHE_ffdhe_shared_secret(Spec_FFDHE_ffdhe_alg a, uint8_t *sk, uint8_t *pk, uint8_t *ss)
 {
   uint32_t len = ffdhe_len(a);
-  uint32_t nLen = (len - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t nLen = (len - 1U) / 8U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen + nLen);
   uint64_t *p_n = (uint64_t *)alloca((nLen + nLen) * sizeof (uint64_t));
   memset(p_n, 0U, (nLen + nLen) * sizeof (uint64_t));
diff --git a/src/msvc/Hacl_Frodo1344.c b/src/msvc/Hacl_Frodo1344.c
index 2951f848..61262a4c 100644
--- a/src/msvc/Hacl_Frodo1344.c
+++ b/src/msvc/Hacl_Frodo1344.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo1344_crypto_bytes = (uint32_t)32U;
+uint32_t Hacl_Frodo1344_crypto_bytes = 32U;
 
-uint32_t Hacl_Frodo1344_crypto_publickeybytes = (uint32_t)21520U;
+uint32_t Hacl_Frodo1344_crypto_publickeybytes = 21520U;
 
-uint32_t Hacl_Frodo1344_crypto_secretkeybytes = (uint32_t)43088U;
+uint32_t Hacl_Frodo1344_crypto_secretkeybytes = 43088U;
 
-uint32_t Hacl_Frodo1344_crypto_ciphertextbytes = (uint32_t)21632U;
+uint32_t Hacl_Frodo1344_crypto_ciphertextbytes = 21632U;
 
 uint32_t Hacl_Frodo1344_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[80U] = { 0U };
-  randombytes_((uint32_t)80U, coins);
+  randombytes_(80U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)32U;
-  uint8_t *z = coins + (uint32_t)64U;
+  uint8_t *seed_se = coins + 32U;
+  uint8_t *z = coins + 64U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)21552U;
+  Hacl_Hash_SHA3_shake256_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 21552U;
   uint16_t s_matrix[10752U] = { 0U };
   uint16_t e_matrix[10752U] = { 0U };
   uint8_t r[43008U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43008U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)1344U,
-    (uint32_t)8U,
-    r + (uint32_t)21504U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43008U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(1344U, 8U, r + 21504U, e_matrix);
   uint16_t b_matrix[10752U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)1344U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)1344U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)1344U,
-    (uint32_t)8U,
-    (uint32_t)16U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)1344U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)10752U, uint16_t);
-  uint32_t slen1 = (uint32_t)43056U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(1344U, 1344U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(1344U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(1344U, 8U, 16U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(1344U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 10752U, uint16_t);
+  uint32_t slen1 = 43056U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)32U, pk, (uint32_t)21520U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)80U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 32U * sizeof (uint8_t));
+  memcpy(sk_p + 32U, pk, 21520U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(21520U, pk, 32U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 80U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo1344_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[32U] = { 0U };
-  randombytes_((uint32_t)32U, coins);
+  randombytes_(32U, coins);
   uint8_t seed_se_k[64U] = { 0U };
   uint8_t pkh_mu[64U] = { 0U };
-  Hacl_SHA3_shake256_hacl((uint32_t)21520U, pk, (uint32_t)32U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)32U, coins, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)64U, pkh_mu, (uint32_t)64U, seed_se_k);
+  Hacl_Hash_SHA3_shake256_hacl(21520U, pk, 32U, pkh_mu);
+  memcpy(pkh_mu + 32U, coins, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(64U, pkh_mu, 64U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)32U;
+  uint8_t *k = seed_se_k + 32U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[10752U] = { 0U };
   uint16_t ep_matrix[10752U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[43136U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, (uint32_t)1344U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)1344U,
-    r + (uint32_t)21504U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)43008U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)21504U;
+  uint8_t *c2 = ct + 21504U;
   uint16_t bp_matrix[10752U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)1344U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 1344U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 1344U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 1344U, 16U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[10752U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(1344U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)21664U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 21664U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)21632U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)21632U, k, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)32U, ss);
+  memcpy(shake_input_ss, ct, 21632U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 21632U, k, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 32U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)32U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t);
+  Lib_Memzero0_memzero(coins, 32U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[10752U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)21504U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix);
+  uint8_t *c2 = ct + 21504U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 1344U, 16U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 16U, c2, c_matrix);
   uint8_t mu_decode[32U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)21552U;
+  uint8_t *s_bytes = sk + 21552U;
   uint16_t s_matrix[10752U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)1344U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(1344U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 1344U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 4U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[64U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)64U;
+  uint32_t pkh_mu_decode_len = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t *pkh_mu_decode = (uint8_t *)alloca(pkh_mu_decode_len * sizeof (uint8_t));
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)43056U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)32U, mu_decode, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)64U, seed_se_k);
+  uint8_t *pkh = sk + 43056U;
+  memcpy(pkh_mu_decode, pkh, 32U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 32U, mu_decode, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 64U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)32U;
+  uint8_t *kp = seed_se_k + 32U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[10752U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,80 +175,58 @@ uint32_t Hacl_Frodo1344_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[43136U] = { 0U };
   uint8_t shake_input_seed_se[33U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)33U, shake_input_seed_se, (uint32_t)43136U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)33U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U, (uint32_t)1344U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)1344U,
-    r + (uint32_t)21504U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)43008U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)32U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(33U, shake_input_seed_se, 43136U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 33U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 1344U, r + 21504U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix1344(8U, 8U, r + 43008U, epp_matrix);
+  uint8_t *pk = sk + 32U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)1806336U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 1806336U);
   uint16_t a_matrix[1806336U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)1344U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)1344U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)1344U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 1344U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 1344U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 1344U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[10752U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)1344U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)1344U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(1344U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 1344U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)4U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)1344U, (uint32_t)16U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)10752U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)1344U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 4U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 1344U, 16U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 10752U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 1344U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[32U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;
   }
-  uint32_t ss_init_len = (uint32_t)21664U;
+  uint32_t ss_init_len = 21664U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *ss_init = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)21632U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)21632U, kp_s, (uint32_t)32U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)32U, ss);
+  memcpy(ss_init, ct, 21632U * sizeof (uint8_t));
+  memcpy(ss_init + 21632U, kp_s, 32U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(ss_init_len, ss_init, 32U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)64U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)32U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 32U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 64U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 32U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/msvc/Hacl_Frodo64.c b/src/msvc/Hacl_Frodo64.c
index 45ee9dd6..392d87f9 100644
--- a/src/msvc/Hacl_Frodo64.c
+++ b/src/msvc/Hacl_Frodo64.c
@@ -34,145 +34,111 @@
  */
 
 
-uint32_t Hacl_Frodo64_crypto_bytes = (uint32_t)16U;
+uint32_t Hacl_Frodo64_crypto_bytes = 16U;
 
-uint32_t Hacl_Frodo64_crypto_publickeybytes = (uint32_t)976U;
+uint32_t Hacl_Frodo64_crypto_publickeybytes = 976U;
 
-uint32_t Hacl_Frodo64_crypto_secretkeybytes = (uint32_t)2032U;
+uint32_t Hacl_Frodo64_crypto_secretkeybytes = 2032U;
 
-uint32_t Hacl_Frodo64_crypto_ciphertextbytes = (uint32_t)1080U;
+uint32_t Hacl_Frodo64_crypto_ciphertextbytes = 1080U;
 
 uint32_t Hacl_Frodo64_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[48U] = { 0U };
-  randombytes_((uint32_t)48U, coins);
+  randombytes_(48U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)16U;
-  uint8_t *z = coins + (uint32_t)32U;
+  uint8_t *seed_se = coins + 16U;
+  uint8_t *z = coins + 32U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)992U;
+  Hacl_Hash_SHA3_shake128_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 992U;
   uint16_t s_matrix[512U] = { 0U };
   uint16_t e_matrix[512U] = { 0U };
   uint8_t r[2048U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2048U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)64U,
-    (uint32_t)8U,
-    r + (uint32_t)1024U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2048U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(64U, 8U, r + 1024U, e_matrix);
   uint16_t b_matrix[512U] = { 0U };
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)64U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)64U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b_matrix, b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)64U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)512U, uint16_t);
-  uint32_t slen1 = (uint32_t)2016U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(64U, 64U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(64U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(64U, 8U, 15U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(64U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 512U, uint16_t);
+  uint32_t slen1 = 2016U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)976U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)48U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 16U * sizeof (uint8_t));
+  memcpy(sk_p + 16U, pk, 976U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(976U, pk, 16U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 48U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo64_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[16U] = { 0U };
-  randombytes_((uint32_t)16U, coins);
+  randombytes_(16U, coins);
   uint8_t seed_se_k[32U] = { 0U };
   uint8_t pkh_mu[32U] = { 0U };
-  Hacl_SHA3_shake128_hacl((uint32_t)976U, pk, (uint32_t)16U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k);
+  Hacl_Hash_SHA3_shake128_hacl(976U, pk, 16U, pkh_mu);
+  memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)16U;
+  uint8_t *k = seed_se_k + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[512U] = { 0U };
   uint16_t ep_matrix[512U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[2176U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)64U,
-    r + (uint32_t)1024U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)2048U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)960U;
+  uint8_t *c2 = ct + 960U;
   uint16_t bp_matrix[512U] = { 0U };
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)64U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 64U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 64U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 64U, 15U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[512U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(64U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)1096U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 1096U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)1080U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)1080U, k, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss);
+  memcpy(shake_input_ss, ct, 1080U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 1080U, k, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(coins, 16U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -180,39 +146,30 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[512U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)960U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix);
+  uint8_t *c2 = ct + 960U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 64U, 15U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 15U, c2, c_matrix);
   uint8_t mu_decode[16U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)992U;
+  uint8_t *s_bytes = sk + 992U;
   uint16_t s_matrix[512U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)64U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(64U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 64U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[32U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)32U;
+  uint32_t pkh_mu_decode_len = 32U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t *pkh_mu_decode = (uint8_t *)alloca(pkh_mu_decode_len * sizeof (uint8_t));
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)2016U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k);
+  uint8_t *pkh = sk + 2016U;
+  memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)16U;
+  uint8_t *kp = seed_se_k + 16U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[512U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -221,80 +178,58 @@ uint32_t Hacl_Frodo64_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[2176U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)2176U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U, (uint32_t)64U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)64U,
-    r + (uint32_t)1024U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)2048U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)16U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 2176U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 64U, r + 1024U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix64(8U, 8U, r + 2048U, epp_matrix);
+  uint8_t *pk = sk + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t a_matrix[4096U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)64U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)64U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)64U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 64U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 64U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 64U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[512U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)64U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)64U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(64U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 64U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)64U, (uint32_t)15U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)512U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)64U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 64U, 15U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 512U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 64U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;);
-  uint32_t ss_init_len = (uint32_t)1096U;
+  uint32_t ss_init_len = 1096U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *ss_init = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)1080U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)1080U, kp_s, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss);
+  memcpy(ss_init, ct, 1080U * sizeof (uint8_t));
+  memcpy(ss_init + 1080U, kp_s, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)16U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 16U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 16U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/msvc/Hacl_Frodo640.c b/src/msvc/Hacl_Frodo640.c
index badd2bae..5de5871f 100644
--- a/src/msvc/Hacl_Frodo640.c
+++ b/src/msvc/Hacl_Frodo640.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo640_crypto_bytes = (uint32_t)16U;
+uint32_t Hacl_Frodo640_crypto_bytes = 16U;
 
-uint32_t Hacl_Frodo640_crypto_publickeybytes = (uint32_t)9616U;
+uint32_t Hacl_Frodo640_crypto_publickeybytes = 9616U;
 
-uint32_t Hacl_Frodo640_crypto_secretkeybytes = (uint32_t)19888U;
+uint32_t Hacl_Frodo640_crypto_secretkeybytes = 19888U;
 
-uint32_t Hacl_Frodo640_crypto_ciphertextbytes = (uint32_t)9720U;
+uint32_t Hacl_Frodo640_crypto_ciphertextbytes = 9720U;
 
 uint32_t Hacl_Frodo640_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[48U] = { 0U };
-  randombytes_((uint32_t)48U, coins);
+  randombytes_(48U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)16U;
-  uint8_t *z = coins + (uint32_t)32U;
+  uint8_t *seed_se = coins + 16U;
+  uint8_t *z = coins + 32U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake128_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)9632U;
+  Hacl_Hash_SHA3_shake128_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 9632U;
   uint16_t s_matrix[5120U] = { 0U };
   uint16_t e_matrix[5120U] = { 0U };
   uint8_t r[20480U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20480U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)640U,
-    (uint32_t)8U,
-    r + (uint32_t)10240U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20480U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(640U, 8U, r + 10240U, e_matrix);
   uint16_t b_matrix[5120U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)640U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)640U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)640U,
-    (uint32_t)8U,
-    (uint32_t)15U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)640U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)5120U, uint16_t);
-  uint32_t slen1 = (uint32_t)19872U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(640U, 640U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(640U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(640U, 8U, 15U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(640U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 5120U, uint16_t);
+  uint32_t slen1 = 19872U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)16U, pk, (uint32_t)9616U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)48U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 16U * sizeof (uint8_t));
+  memcpy(sk_p + 16U, pk, 9616U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(9616U, pk, 16U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 48U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo640_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[16U] = { 0U };
-  randombytes_((uint32_t)16U, coins);
+  randombytes_(16U, coins);
   uint8_t seed_se_k[32U] = { 0U };
   uint8_t pkh_mu[32U] = { 0U };
-  Hacl_SHA3_shake128_hacl((uint32_t)9616U, pk, (uint32_t)16U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)16U, coins, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)32U, pkh_mu, (uint32_t)32U, seed_se_k);
+  Hacl_Hash_SHA3_shake128_hacl(9616U, pk, 16U, pkh_mu);
+  memcpy(pkh_mu + 16U, coins, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(32U, pkh_mu, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)16U;
+  uint8_t *k = seed_se_k + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[5120U] = { 0U };
   uint16_t ep_matrix[5120U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[20608U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, (uint32_t)640U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)640U,
-    r + (uint32_t)10240U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)20480U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)9600U;
+  uint8_t *c2 = ct + 9600U;
   uint16_t bp_matrix[5120U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)640U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 640U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 640U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 640U, 15U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[5120U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(640U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)9736U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 15U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 9736U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)9720U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)9720U, k, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, shake_input_ss, (uint32_t)16U, ss);
+  memcpy(shake_input_ss, ct, 9720U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 9720U, k, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(ss_init_len, shake_input_ss, 16U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(coins, 16U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[5120U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)9600U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, c2, c_matrix);
+  uint8_t *c2 = ct + 9600U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 640U, 15U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 15U, c2, c_matrix);
   uint8_t mu_decode[16U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)9632U;
+  uint8_t *s_bytes = sk + 9632U;
   uint16_t s_matrix[5120U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)640U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(640U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 640U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(15U, 2U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[32U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)32U;
+  uint32_t pkh_mu_decode_len = 32U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t *pkh_mu_decode = (uint8_t *)alloca(pkh_mu_decode_len * sizeof (uint8_t));
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)19872U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)16U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)16U, mu_decode, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)32U, seed_se_k);
+  uint8_t *pkh = sk + 19872U;
+  memcpy(pkh_mu_decode, pkh, 16U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 16U, mu_decode, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(pkh_mu_decode_len, pkh_mu_decode, 32U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)16U;
+  uint8_t *kp = seed_se_k + 16U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[5120U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,81 +175,59 @@ uint32_t Hacl_Frodo640_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[20608U] = { 0U };
   uint8_t shake_input_seed_se[17U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl((uint32_t)17U, shake_input_seed_se, (uint32_t)20608U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)17U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U, (uint32_t)640U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)640U,
-    r + (uint32_t)10240U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)20480U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)16U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(17U, shake_input_seed_se, 20608U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 17U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 640U, r + 10240U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix640(8U, 8U, r + 20480U, epp_matrix);
+  uint8_t *pk = sk + 16U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)409600U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 409600U);
   uint16_t a_matrix[409600U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)640U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)640U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)640U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 640U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 640U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 640U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[5120U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)640U, (uint32_t)8U, (uint32_t)15U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)640U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(640U, 8U, 15U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 640U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)15U,
-    (uint32_t)2U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)640U, (uint32_t)15U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)15U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)5120U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)640U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(15U, 2U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 640U, 15U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 15U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 5120U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 640U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[16U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;);
-  uint32_t ss_init_len = (uint32_t)9736U;
+  uint32_t ss_init_len = 9736U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *ss_init = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)9720U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)9720U, kp_s, (uint32_t)16U * sizeof (uint8_t));
-  Hacl_SHA3_shake128_hacl(ss_init_len, ss_init, (uint32_t)16U, ss);
+  memcpy(ss_init, ct, 9720U * sizeof (uint8_t));
+  memcpy(ss_init + 9720U, kp_s, 16U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake128_hacl(ss_init_len, ss_init, 16U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)16U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)32U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)16U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 16U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 32U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 16U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/msvc/Hacl_Frodo976.c b/src/msvc/Hacl_Frodo976.c
index dbd9bc32..61454ceb 100644
--- a/src/msvc/Hacl_Frodo976.c
+++ b/src/msvc/Hacl_Frodo976.c
@@ -29,151 +29,113 @@
 #include "internal/Hacl_Frodo_KEM.h"
 #include "lib_memzero0.h"
 
-uint32_t Hacl_Frodo976_crypto_bytes = (uint32_t)24U;
+uint32_t Hacl_Frodo976_crypto_bytes = 24U;
 
-uint32_t Hacl_Frodo976_crypto_publickeybytes = (uint32_t)15632U;
+uint32_t Hacl_Frodo976_crypto_publickeybytes = 15632U;
 
-uint32_t Hacl_Frodo976_crypto_secretkeybytes = (uint32_t)31296U;
+uint32_t Hacl_Frodo976_crypto_secretkeybytes = 31296U;
 
-uint32_t Hacl_Frodo976_crypto_ciphertextbytes = (uint32_t)15744U;
+uint32_t Hacl_Frodo976_crypto_ciphertextbytes = 15744U;
 
 uint32_t Hacl_Frodo976_crypto_kem_keypair(uint8_t *pk, uint8_t *sk)
 {
   uint8_t coins[64U] = { 0U };
-  randombytes_((uint32_t)64U, coins);
+  randombytes_(64U, coins);
   uint8_t *s = coins;
-  uint8_t *seed_se = coins + (uint32_t)24U;
-  uint8_t *z = coins + (uint32_t)48U;
+  uint8_t *seed_se = coins + 24U;
+  uint8_t *z = coins + 48U;
   uint8_t *seed_a = pk;
-  Hacl_SHA3_shake256_hacl((uint32_t)16U, z, (uint32_t)16U, seed_a);
-  uint8_t *b_bytes = pk + (uint32_t)16U;
-  uint8_t *s_bytes = sk + (uint32_t)15656U;
+  Hacl_Hash_SHA3_shake256_hacl(16U, z, 16U, seed_a);
+  uint8_t *b_bytes = pk + 16U;
+  uint8_t *s_bytes = sk + 15656U;
   uint16_t s_matrix[7808U] = { 0U };
   uint16_t e_matrix[7808U] = { 0U };
   uint8_t r[31232U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x5fU;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31232U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U, (uint32_t)8U, r, s_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)976U,
-    (uint32_t)8U,
-    r + (uint32_t)15616U,
-    e_matrix);
+  shake_input_seed_se[0U] = 0x5fU;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31232U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r, s_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(976U, 8U, r + 15616U, e_matrix);
   uint16_t b_matrix[7808U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)976U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    a_matrix,
-    s_matrix,
-    b_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)976U, (uint32_t)8U, b_matrix, e_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)976U,
-    (uint32_t)8U,
-    (uint32_t)16U,
-    b_matrix,
-    b_bytes);
-  Hacl_Impl_Matrix_matrix_to_lbytes((uint32_t)976U, (uint32_t)8U, s_matrix, s_bytes);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(e_matrix, (uint32_t)7808U, uint16_t);
-  uint32_t slen1 = (uint32_t)31272U;
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(976U, 976U, 8U, a_matrix, s_matrix, b_matrix);
+  Hacl_Impl_Matrix_matrix_add(976U, 8U, b_matrix, e_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(976U, 8U, 16U, b_matrix, b_bytes);
+  Hacl_Impl_Matrix_matrix_to_lbytes(976U, 8U, s_matrix, s_bytes);
+  Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(e_matrix, 7808U, uint16_t);
+  uint32_t slen1 = 31272U;
   uint8_t *sk_p = sk;
-  memcpy(sk_p, s, (uint32_t)24U * sizeof (uint8_t));
-  memcpy(sk_p + (uint32_t)24U, pk, (uint32_t)15632U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, sk + slen1);
-  Lib_Memzero0_memzero(coins, (uint32_t)64U, uint8_t);
-  return (uint32_t)0U;
+  memcpy(sk_p, s, 24U * sizeof (uint8_t));
+  memcpy(sk_p + 24U, pk, 15632U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(15632U, pk, 24U, sk + slen1);
+  Lib_Memzero0_memzero(coins, 64U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo976_crypto_kem_enc(uint8_t *ct, uint8_t *ss, uint8_t *pk)
 {
   uint8_t coins[24U] = { 0U };
-  randombytes_((uint32_t)24U, coins);
+  randombytes_(24U, coins);
   uint8_t seed_se_k[48U] = { 0U };
   uint8_t pkh_mu[48U] = { 0U };
-  Hacl_SHA3_shake256_hacl((uint32_t)15632U, pk, (uint32_t)24U, pkh_mu);
-  memcpy(pkh_mu + (uint32_t)24U, coins, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)48U, pkh_mu, (uint32_t)48U, seed_se_k);
+  Hacl_Hash_SHA3_shake256_hacl(15632U, pk, 24U, pkh_mu);
+  memcpy(pkh_mu + 24U, coins, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(48U, pkh_mu, 48U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *k = seed_se_k + (uint32_t)24U;
+  uint8_t *k = seed_se_k + 24U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
+  uint8_t *b = pk + 16U;
   uint16_t sp_matrix[7808U] = { 0U };
   uint16_t ep_matrix[7808U] = { 0U };
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[31360U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, (uint32_t)976U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)976U,
-    r + (uint32_t)15616U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)31232U,
-    epp_matrix);
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix);
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)15616U;
+  uint8_t *c2 = ct + 15616U;
   uint16_t bp_matrix[7808U] = { 0U };
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)976U,
-    sp_matrix,
-    a_matrix,
-    bp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bp_matrix, ep_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, bp_matrix, c1);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 976U, sp_matrix, a_matrix, bp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 976U, bp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 976U, 16U, bp_matrix, c1);
   uint16_t v_matrix[64U] = { 0U };
   uint16_t b_matrix[7808U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    v_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(976U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 8U, sp_matrix, b_matrix, v_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    coins,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, v_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Frodo_Pack_frodo_pack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, v_matrix, c2);
-  Lib_Memzero0_memzero(v_matrix, (uint32_t)64U, uint16_t);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint32_t ss_init_len = (uint32_t)15768U;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, coins, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, v_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Frodo_Pack_frodo_pack(8U, 8U, 16U, v_matrix, c2);
+  Lib_Memzero0_memzero(v_matrix, 64U, uint16_t);
+  Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint32_t ss_init_len = 15768U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *shake_input_ss = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(shake_input_ss, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(shake_input_ss, ct, (uint32_t)15744U * sizeof (uint8_t));
-  memcpy(shake_input_ss + (uint32_t)15744U, k, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, shake_input_ss, (uint32_t)24U, ss);
+  memcpy(shake_input_ss, ct, 15744U * sizeof (uint8_t));
+  memcpy(shake_input_ss + 15744U, k, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(ss_init_len, shake_input_ss, 24U, ss);
   Lib_Memzero0_memzero(shake_input_ss, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(coins, (uint32_t)24U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t);
+  Lib_Memzero0_memzero(coins, 24U, uint8_t);
+  return 0U;
 }
 
 uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
@@ -181,39 +143,30 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t bp_matrix[7808U] = { 0U };
   uint16_t c_matrix[64U] = { 0U };
   uint8_t *c1 = ct;
-  uint8_t *c2 = ct + (uint32_t)15616U;
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, c1, bp_matrix);
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, c2, c_matrix);
+  uint8_t *c2 = ct + 15616U;
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 976U, 16U, c1, bp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(8U, 8U, 16U, c2, c_matrix);
   uint8_t mu_decode[24U] = { 0U };
-  uint8_t *s_bytes = sk + (uint32_t)15656U;
+  uint8_t *s_bytes = sk + 15656U;
   uint16_t s_matrix[7808U] = { 0U };
   uint16_t m_matrix[64U] = { 0U };
-  Hacl_Impl_Matrix_matrix_from_lbytes((uint32_t)976U, (uint32_t)8U, s_bytes, s_matrix);
-  Hacl_Impl_Matrix_matrix_mul_s((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    bp_matrix,
-    s_matrix,
-    m_matrix);
-  Hacl_Impl_Matrix_matrix_sub((uint32_t)8U, (uint32_t)8U, c_matrix, m_matrix);
-  Hacl_Impl_Frodo_Encode_frodo_key_decode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    m_matrix,
-    mu_decode);
-  Lib_Memzero0_memzero(s_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(m_matrix, (uint32_t)64U, uint16_t);
+  Hacl_Impl_Matrix_matrix_from_lbytes(976U, 8U, s_bytes, s_matrix);
+  Hacl_Impl_Matrix_matrix_mul_s(8U, 976U, 8U, bp_matrix, s_matrix, m_matrix);
+  Hacl_Impl_Matrix_matrix_sub(8U, 8U, c_matrix, m_matrix);
+  Hacl_Impl_Frodo_Encode_frodo_key_decode(16U, 3U, 8U, m_matrix, mu_decode);
+  Lib_Memzero0_memzero(s_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(m_matrix, 64U, uint16_t);
   uint8_t seed_se_k[48U] = { 0U };
-  uint32_t pkh_mu_decode_len = (uint32_t)48U;
+  uint32_t pkh_mu_decode_len = 48U;
   KRML_CHECK_SIZE(sizeof (uint8_t), pkh_mu_decode_len);
   uint8_t *pkh_mu_decode = (uint8_t *)alloca(pkh_mu_decode_len * sizeof (uint8_t));
   memset(pkh_mu_decode, 0U, pkh_mu_decode_len * sizeof (uint8_t));
-  uint8_t *pkh = sk + (uint32_t)31272U;
-  memcpy(pkh_mu_decode, pkh, (uint32_t)24U * sizeof (uint8_t));
-  memcpy(pkh_mu_decode + (uint32_t)24U, mu_decode, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, (uint32_t)48U, seed_se_k);
+  uint8_t *pkh = sk + 31272U;
+  memcpy(pkh_mu_decode, pkh, 24U * sizeof (uint8_t));
+  memcpy(pkh_mu_decode + 24U, mu_decode, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(pkh_mu_decode_len, pkh_mu_decode, 48U, seed_se_k);
   uint8_t *seed_se = seed_se_k;
-  uint8_t *kp = seed_se_k + (uint32_t)24U;
+  uint8_t *kp = seed_se_k + 24U;
   uint8_t *s = sk;
   uint16_t bpp_matrix[7808U] = { 0U };
   uint16_t cp_matrix[64U] = { 0U };
@@ -222,80 +175,58 @@ uint32_t Hacl_Frodo976_crypto_kem_dec(uint8_t *ss, uint8_t *ct, uint8_t *sk)
   uint16_t epp_matrix[64U] = { 0U };
   uint8_t r[31360U] = { 0U };
   uint8_t shake_input_seed_se[25U] = { 0U };
-  shake_input_seed_se[0U] = (uint8_t)0x96U;
-  memcpy(shake_input_seed_se + (uint32_t)1U, seed_se, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl((uint32_t)25U, shake_input_seed_se, (uint32_t)31360U, r);
-  Lib_Memzero0_memzero(shake_input_seed_se, (uint32_t)25U, uint8_t);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U, (uint32_t)976U, r, sp_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)976U,
-    r + (uint32_t)15616U,
-    ep_matrix);
-  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976((uint32_t)8U,
-    (uint32_t)8U,
-    r + (uint32_t)31232U,
-    epp_matrix);
-  uint8_t *pk = sk + (uint32_t)24U;
+  shake_input_seed_se[0U] = 0x96U;
+  memcpy(shake_input_seed_se + 1U, seed_se, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(25U, shake_input_seed_se, 31360U, r);
+  Lib_Memzero0_memzero(shake_input_seed_se, 25U, uint8_t);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r, sp_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 976U, r + 15616U, ep_matrix);
+  Hacl_Impl_Frodo_Sample_frodo_sample_matrix976(8U, 8U, r + 31232U, epp_matrix);
+  uint8_t *pk = sk + 24U;
   uint8_t *seed_a = pk;
-  uint8_t *b = pk + (uint32_t)16U;
-  KRML_CHECK_SIZE(sizeof (uint16_t), (uint32_t)952576U);
+  uint8_t *b = pk + 16U;
+  KRML_CHECK_SIZE(sizeof (uint16_t), 952576U);
   uint16_t a_matrix[952576U] = { 0U };
-  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128,
-    (uint32_t)976U,
-    seed_a,
-    a_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)976U,
-    sp_matrix,
-    a_matrix,
-    bpp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)976U, bpp_matrix, ep_matrix);
+  Hacl_Impl_Frodo_Params_frodo_gen_matrix(Spec_Frodo_Params_SHAKE128, 976U, seed_a, a_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 976U, sp_matrix, a_matrix, bpp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 976U, bpp_matrix, ep_matrix);
   uint16_t b_matrix[7808U] = { 0U };
-  Hacl_Impl_Frodo_Pack_frodo_unpack((uint32_t)976U, (uint32_t)8U, (uint32_t)16U, b, b_matrix);
-  Hacl_Impl_Matrix_matrix_mul((uint32_t)8U,
-    (uint32_t)976U,
-    (uint32_t)8U,
-    sp_matrix,
-    b_matrix,
-    cp_matrix);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, epp_matrix);
+  Hacl_Impl_Frodo_Pack_frodo_unpack(976U, 8U, 16U, b, b_matrix);
+  Hacl_Impl_Matrix_matrix_mul(8U, 976U, 8U, sp_matrix, b_matrix, cp_matrix);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, epp_matrix);
   uint16_t mu_encode[64U] = { 0U };
-  Hacl_Impl_Frodo_Encode_frodo_key_encode((uint32_t)16U,
-    (uint32_t)3U,
-    (uint32_t)8U,
-    mu_decode,
-    mu_encode);
-  Hacl_Impl_Matrix_matrix_add((uint32_t)8U, (uint32_t)8U, cp_matrix, mu_encode);
-  Lib_Memzero0_memzero(mu_encode, (uint32_t)64U, uint16_t);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)976U, (uint32_t)16U, bpp_matrix);
-  Hacl_Impl_Matrix_mod_pow2((uint32_t)8U, (uint32_t)8U, (uint32_t)16U, cp_matrix);
-  Lib_Memzero0_memzero(sp_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(ep_matrix, (uint32_t)7808U, uint16_t);
-  Lib_Memzero0_memzero(epp_matrix, (uint32_t)64U, uint16_t);
-  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)976U, bp_matrix, bpp_matrix);
-  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq((uint32_t)8U, (uint32_t)8U, c_matrix, cp_matrix);
-  uint16_t mask = b1 & b2;
+  Hacl_Impl_Frodo_Encode_frodo_key_encode(16U, 3U, 8U, mu_decode, mu_encode);
+  Hacl_Impl_Matrix_matrix_add(8U, 8U, cp_matrix, mu_encode);
+  Lib_Memzero0_memzero(mu_encode, 64U, uint16_t);
+  Hacl_Impl_Matrix_mod_pow2(8U, 976U, 16U, bpp_matrix);
+  Hacl_Impl_Matrix_mod_pow2(8U, 8U, 16U, cp_matrix);
+  Lib_Memzero0_memzero(sp_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(ep_matrix, 7808U, uint16_t);
+  Lib_Memzero0_memzero(epp_matrix, 64U, uint16_t);
+  uint16_t b1 = Hacl_Impl_Matrix_matrix_eq(8U, 976U, bp_matrix, bpp_matrix);
+  uint16_t b2 = Hacl_Impl_Matrix_matrix_eq(8U, 8U, c_matrix, cp_matrix);
+  uint16_t mask = (uint32_t)b1 & (uint32_t)b2;
   uint16_t mask0 = mask;
   uint8_t kp_s[24U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+  for (uint32_t i = 0U; i < 24U; i++)
   {
     uint8_t *os = kp_s;
     uint8_t uu____0 = s[i];
-    uint8_t x = uu____0 ^ ((uint8_t)mask0 & (kp[i] ^ uu____0));
+    uint8_t
+    x = (uint32_t)uu____0 ^ ((uint32_t)(uint8_t)mask0 & ((uint32_t)kp[i] ^ (uint32_t)uu____0));
     os[i] = x;
   }
-  uint32_t ss_init_len = (uint32_t)15768U;
+  uint32_t ss_init_len = 15768U;
   KRML_CHECK_SIZE(sizeof (uint8_t), ss_init_len);
   uint8_t *ss_init = (uint8_t *)alloca(ss_init_len * sizeof (uint8_t));
   memset(ss_init, 0U, ss_init_len * sizeof (uint8_t));
-  memcpy(ss_init, ct, (uint32_t)15744U * sizeof (uint8_t));
-  memcpy(ss_init + (uint32_t)15744U, kp_s, (uint32_t)24U * sizeof (uint8_t));
-  Hacl_SHA3_shake256_hacl(ss_init_len, ss_init, (uint32_t)24U, ss);
+  memcpy(ss_init, ct, 15744U * sizeof (uint8_t));
+  memcpy(ss_init + 15744U, kp_s, 24U * sizeof (uint8_t));
+  Hacl_Hash_SHA3_shake256_hacl(ss_init_len, ss_init, 24U, ss);
   Lib_Memzero0_memzero(ss_init, ss_init_len, uint8_t);
-  Lib_Memzero0_memzero(kp_s, (uint32_t)24U, uint8_t);
-  Lib_Memzero0_memzero(seed_se_k, (uint32_t)48U, uint8_t);
-  Lib_Memzero0_memzero(mu_decode, (uint32_t)24U, uint8_t);
-  return (uint32_t)0U;
+  Lib_Memzero0_memzero(kp_s, 24U, uint8_t);
+  Lib_Memzero0_memzero(seed_se_k, 48U, uint8_t);
+  Lib_Memzero0_memzero(mu_decode, 24U, uint8_t);
+  return 0U;
 }
 
diff --git a/src/msvc/Hacl_Frodo_KEM.c b/src/msvc/Hacl_Frodo_KEM.c
index 4265ac0e..e0a65a47 100644
--- a/src/msvc/Hacl_Frodo_KEM.c
+++ b/src/msvc/Hacl_Frodo_KEM.c
@@ -30,6 +30,6 @@
 
 void randombytes_(uint32_t len, uint8_t *res)
 {
-  KRML_HOST_IGNORE(Lib_RandomBuffer_System_randombytes(res, len));
+  Lib_RandomBuffer_System_randombytes(res, len);
 }
 
diff --git a/src/msvc/Hacl_GenericField32.c b/src/msvc/Hacl_GenericField32.c
index 47ca15e8..750d56fc 100644
--- a/src/msvc/Hacl_GenericField32.c
+++ b/src/msvc/Hacl_GenericField32.c
@@ -56,7 +56,7 @@ Check whether this library will work for a modulus `n`.
 bool Hacl_GenericField32_field_modulus_check(uint32_t len, uint32_t *n)
 {
   uint32_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u32(len, n);
-  return m == (uint32_t)0xFFFFFFFFU;
+  return m == 0xFFFFFFFFU;
 }
 
 /**
@@ -82,7 +82,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32
   uint32_t *r21 = r2;
   uint32_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint32_t));
-  uint32_t nBits = (uint32_t)32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
+  uint32_t nBits = 32U * Hacl_Bignum_Lib_bn_get_top_index_u32(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u32(len, nBits, n, r21);
   uint32_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint32(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -283,27 +283,27 @@ Hacl_GenericField32_exp_consttime(
   uint32_t *aMc = (uint32_t *)alloca(k1.len * sizeof (uint32_t));
   memset(aMc, 0U, k1.len * sizeof (uint32_t));
   memcpy(aMc, aM, k1.len * sizeof (uint32_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t *ctx = (uint32_t *)alloca((len1 + len1) * sizeof (uint32_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    uint32_t sw = (uint32_t)0U;
+    uint32_t sw = 0U;
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)32U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)32U;
+      uint32_t i1 = (bBits - i0 - 1U) / 32U;
+      uint32_t j = (bBits - i0 - 1U) % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
+      uint32_t bit = tmp >> j & 1U;
       uint32_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len1; i++)
+      for (uint32_t i = 0U; i < len1; i++)
       {
-        uint32_t dummy = ((uint32_t)0U - sw1) & (resM[i] ^ aMc[i]);
+        uint32_t dummy = (0U - sw1) & (resM[i] ^ aMc[i]);
         resM[i] = resM[i] ^ dummy;
         aMc[i] = aMc[i] ^ dummy;
       }
@@ -314,9 +314,9 @@ Hacl_GenericField32_exp_consttime(
       sw = bit;
     }
     uint32_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len1; i++)
+    for (uint32_t i = 0U; i < len1; i++)
     {
-      uint32_t dummy = ((uint32_t)0U - sw0) & (resM[i] ^ aMc[i]);
+      uint32_t dummy = (0U - sw0) & (resM[i] ^ aMc[i]);
       resM[i] = resM[i] ^ dummy;
       aMc[i] = aMc[i] ^ dummy;
     }
@@ -324,22 +324,22 @@ Hacl_GenericField32_exp_consttime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 32U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t *ctx = (uint32_t *)alloca((len1 + len1) * sizeof (uint32_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1);
-    uint32_t *table = (uint32_t *)alloca((uint32_t)16U * len1 * sizeof (uint32_t));
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1);
+    uint32_t *table = (uint32_t *)alloca(16U * len1 * sizeof (uint32_t));
+    memset(table, 0U, 16U * len1 * sizeof (uint32_t));
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t));
     memset(tmp, 0U, len1 * sizeof (uint32_t));
@@ -350,29 +350,29 @@ Hacl_GenericField32_exp_consttime(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint32_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint32_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint32_t *t11 = table + (i + 1U) * len1;
       uint32_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint32_t));
-      uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t));
+      uint32_t *t2 = table + (2U * i + 2U) * len1;
       uint32_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint32_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, (uint32_t)4U);
-      memcpy(resM, (uint32_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint32_t));
+      uint32_t i0 = bBits / 4U * 4U;
+      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i0, 4U);
+      memcpy(resM, (uint32_t *)(table + 0U * len1), len1 * sizeof (uint32_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + (uint32_t)1U);
-        const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint32_t c = FStar_UInt32_eq_mask(bits_c, i1 + 1U);
+        const uint32_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint32_t *os = resM;
           uint32_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -388,24 +388,24 @@ Hacl_GenericField32_exp_consttime(
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t *tmp0 = (uint32_t *)alloca(len1 * sizeof (uint32_t));
     memset(tmp0, 0U, len1 * sizeof (uint32_t));
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
     {
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint32_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, (uint32_t)4U);
-      memcpy(tmp0, (uint32_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint32_t));
+      uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U;
+      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U);
+      memcpy(tmp0, (uint32_t *)(table + 0U * len1), len1 * sizeof (uint32_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + (uint32_t)1U);
-        const uint32_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint32_t c = FStar_UInt32_eq_mask(bits_l, i1 + 1U);
+        const uint32_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint32_t *os = tmp0;
           uint32_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -450,7 +450,7 @@ Hacl_GenericField32_exp_vartime(
   uint32_t *aMc = (uint32_t *)alloca(k1.len * sizeof (uint32_t));
   memset(aMc, 0U, k1.len * sizeof (uint32_t));
   memcpy(aMc, aM, k1.len * sizeof (uint32_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t *ctx = (uint32_t *)alloca((len1 + len1) * sizeof (uint32_t));
@@ -460,13 +460,13 @@ Hacl_GenericField32_exp_vartime(
     uint32_t *ctx_n = ctx;
     uint32_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)32U;
-      uint32_t j = i % (uint32_t)32U;
+      uint32_t i1 = i / 32U;
+      uint32_t j = i % 32U;
       uint32_t tmp = b[i1];
-      uint32_t bit = tmp >> j & (uint32_t)1U;
-      if (!(bit == (uint32_t)0U))
+      uint32_t bit = tmp >> j & 1U;
+      if (!(bit == 0U))
       {
         uint32_t *ctx_n0 = ctx;
         Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n0, k1.mu, resM, aMc, resM);
@@ -478,22 +478,22 @@ Hacl_GenericField32_exp_vartime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)32U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 32U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint32_t), len1 + len1);
     uint32_t *ctx = (uint32_t *)alloca((len1 + len1) * sizeof (uint32_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint32_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint32_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint32_t));
-    KRML_CHECK_SIZE(sizeof (uint32_t), (uint32_t)16U * len1);
-    uint32_t *table = (uint32_t *)alloca((uint32_t)16U * len1 * sizeof (uint32_t));
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint32_t));
+    KRML_CHECK_SIZE(sizeof (uint32_t), 16U * len1);
+    uint32_t *table = (uint32_t *)alloca(16U * len1 * sizeof (uint32_t));
+    memset(table, 0U, 16U * len1 * sizeof (uint32_t));
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t *tmp = (uint32_t *)alloca(len1 * sizeof (uint32_t));
     memset(tmp, 0U, len1 * sizeof (uint32_t));
@@ -504,21 +504,21 @@ Hacl_GenericField32_exp_vartime(
     Hacl_Bignum_Montgomery_bn_from_mont_u32(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint32_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint32_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint32_t *t11 = table + (i + 1U) * len1;
       uint32_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint32_t));
-      uint32_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint32_t));
+      uint32_t *t2 = table + (2U * i + 2U) * len1;
       uint32_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u32(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint32_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint32_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, (uint32_t)4U);
+      uint32_t i = bBits / 4U * 4U;
+      uint32_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, i, 4U);
       uint32_t bits_l32 = bits_c;
       const uint32_t *a_bits_l = table + bits_l32 * len1;
       memcpy(resM, (uint32_t *)a_bits_l, len1 * sizeof (uint32_t));
@@ -532,16 +532,16 @@ Hacl_GenericField32_exp_vartime(
     KRML_CHECK_SIZE(sizeof (uint32_t), len1);
     uint32_t *tmp0 = (uint32_t *)alloca(len1 * sizeof (uint32_t));
     memset(tmp0, 0U, len1 * sizeof (uint32_t));
-    for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < bBits / 4U; i++)
     {
       KRML_MAYBE_FOR4(i0,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint32_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u32(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, (uint32_t)4U);
+      uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U;
+      uint32_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u32(bLen, b, k2, 4U);
       uint32_t bits_l32 = bits_l;
       const uint32_t *a_bits_l = table + bits_l32 * len1;
       memcpy(tmp0, (uint32_t *)a_bits_l, len1 * sizeof (uint32_t));
@@ -574,38 +574,33 @@ Hacl_GenericField32_inverse(
   KRML_CHECK_SIZE(sizeof (uint32_t), len1);
   uint32_t *n2 = (uint32_t *)alloca(len1 * sizeof (uint32_t));
   memset(n2, 0U, len1 * sizeof (uint32_t));
-  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32((uint32_t)0U, k1.n[0U], (uint32_t)2U, n2);
+  uint32_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u32(0U, k1.n[0U], 2U, n2);
   uint32_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint32_t *a1 = k1.n + (uint32_t)1U;
-    uint32_t *res1 = n2 + (uint32_t)1U;
+    uint32_t *a1 = k1.n + 1U;
+    uint32_t *res1 = n2 + 1U;
     uint32_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint32_t t1 = a1[(uint32_t)4U * i];
-      uint32_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i0);
-      uint32_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint32_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, (uint32_t)0U, res_i1);
-      uint32_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint32_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, (uint32_t)0U, res_i2);
-      uint32_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint32_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, (uint32_t)0U, res_i);
+      uint32_t t1 = a1[4U * i];
+      uint32_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i0);
+      uint32_t t10 = a1[4U * i + 1U];
+      uint32_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t10, 0U, res_i1);
+      uint32_t t11 = a1[4U * i + 2U];
+      uint32_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t11, 0U, res_i2);
+      uint32_t t12 = a1[4U * i + 3U];
+      uint32_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t12, 0U, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint32_t t1 = a1[i];
       uint32_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, (uint32_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u32(c, t1, 0U, res_i);
     }
     uint32_t c10 = c;
     c1 = c10;
@@ -614,7 +609,7 @@ Hacl_GenericField32_inverse(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
-  Hacl_GenericField32_exp_vartime(k, aM, k1.len * (uint32_t)32U, n2, aInvM);
+  KRML_MAYBE_UNUSED_VAR(c1);
+  Hacl_GenericField32_exp_vartime(k, aM, k1.len * 32U, n2, aInvM);
 }
 
diff --git a/src/msvc/Hacl_GenericField64.c b/src/msvc/Hacl_GenericField64.c
index e8084285..04f54288 100644
--- a/src/msvc/Hacl_GenericField64.c
+++ b/src/msvc/Hacl_GenericField64.c
@@ -55,7 +55,7 @@ Check whether this library will work for a modulus `n`.
 bool Hacl_GenericField64_field_modulus_check(uint32_t len, uint64_t *n)
 {
   uint64_t m = Hacl_Bignum_Montgomery_bn_check_modulus_u64(len, n);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -81,7 +81,7 @@ Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64
   uint64_t *r21 = r2;
   uint64_t *n11 = n1;
   memcpy(n11, n, len * sizeof (uint64_t));
-  uint32_t nBits = (uint32_t)64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
+  uint32_t nBits = 64U * (uint32_t)Hacl_Bignum_Lib_bn_get_top_index_u64(len, n);
   Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64(len, nBits, n, r21);
   uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
   Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64 res = { .len = len, .n = n11, .mu = mu, .r2 = r21 };
@@ -282,27 +282,27 @@ Hacl_GenericField64_exp_consttime(
   uint64_t *aMc = (uint64_t *)alloca(k1.len * sizeof (uint64_t));
   memset(aMc, 0U, k1.len * sizeof (uint64_t));
   memcpy(aMc, aM, k1.len * sizeof (uint64_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t *ctx = (uint64_t *)alloca((len1 + len1) * sizeof (uint64_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    uint64_t sw = (uint64_t)0U;
+    uint64_t sw = 0ULL;
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits; i0++)
     {
-      uint32_t i1 = (bBits - i0 - (uint32_t)1U) / (uint32_t)64U;
-      uint32_t j = (bBits - i0 - (uint32_t)1U) % (uint32_t)64U;
+      uint32_t i1 = (bBits - i0 - 1U) / 64U;
+      uint32_t j = (bBits - i0 - 1U) % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
+      uint64_t bit = tmp >> j & 1ULL;
       uint64_t sw1 = bit ^ sw;
-      for (uint32_t i = (uint32_t)0U; i < len1; i++)
+      for (uint32_t i = 0U; i < len1; i++)
       {
-        uint64_t dummy = ((uint64_t)0U - sw1) & (resM[i] ^ aMc[i]);
+        uint64_t dummy = (0ULL - sw1) & (resM[i] ^ aMc[i]);
         resM[i] = resM[i] ^ dummy;
         aMc[i] = aMc[i] ^ dummy;
       }
@@ -313,9 +313,9 @@ Hacl_GenericField64_exp_consttime(
       sw = bit;
     }
     uint64_t sw0 = sw;
-    for (uint32_t i = (uint32_t)0U; i < len1; i++)
+    for (uint32_t i = 0U; i < len1; i++)
     {
-      uint64_t dummy = ((uint64_t)0U - sw0) & (resM[i] ^ aMc[i]);
+      uint64_t dummy = (0ULL - sw0) & (resM[i] ^ aMc[i]);
       resM[i] = resM[i] ^ dummy;
       aMc[i] = aMc[i] ^ dummy;
     }
@@ -323,22 +323,22 @@ Hacl_GenericField64_exp_consttime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 64U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t *ctx = (uint64_t *)alloca((len1 + len1) * sizeof (uint64_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1);
-    uint64_t *table = (uint64_t *)alloca((uint32_t)16U * len1 * sizeof (uint64_t));
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1);
+    uint64_t *table = (uint64_t *)alloca(16U * len1 * sizeof (uint64_t));
+    memset(table, 0U, 16U * len1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t));
     memset(tmp, 0U, len1 * sizeof (uint64_t));
@@ -349,29 +349,29 @@ Hacl_GenericField64_exp_consttime(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint64_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint64_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint64_t *t11 = table + (i + 1U) * len1;
       uint64_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint64_t));
-      uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t));
+      uint64_t *t2 = table + (2U * i + 2U) * len1;
       uint64_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint64_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i0 = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, (uint32_t)4U);
-      memcpy(resM, (uint64_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint64_t));
+      uint32_t i0 = bBits / 4U * 4U;
+      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i0, 4U);
+      memcpy(resM, (uint64_t *)(table + 0U * len1), len1 * sizeof (uint64_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + (uint32_t)1U));
-        const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint64_t c = FStar_UInt64_eq_mask(bits_c, (uint64_t)(i1 + 1U));
+        const uint64_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint64_t *os = resM;
           uint64_t x = (c & res_j[i]) | (~c & resM[i]);
@@ -387,24 +387,24 @@ Hacl_GenericField64_exp_consttime(
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t *tmp0 = (uint64_t *)alloca(len1 * sizeof (uint64_t));
     memset(tmp0, 0U, len1 * sizeof (uint64_t));
-    for (uint32_t i0 = (uint32_t)0U; i0 < bBits / (uint32_t)4U; i0++)
+    for (uint32_t i0 = 0U; i0 < bBits / 4U; i0++)
     {
       KRML_MAYBE_FOR4(i,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i0 - (uint32_t)4U;
-      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, (uint32_t)4U);
-      memcpy(tmp0, (uint64_t *)(table + (uint32_t)0U * len1), len1 * sizeof (uint64_t));
+      uint32_t k2 = bBits - bBits % 4U - 4U * i0 - 4U;
+      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U);
+      memcpy(tmp0, (uint64_t *)(table + 0U * len1), len1 * sizeof (uint64_t));
       KRML_MAYBE_FOR15(i1,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
-        uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-        const uint64_t *res_j = table + (i1 + (uint32_t)1U) * len1;
-        for (uint32_t i = (uint32_t)0U; i < len1; i++)
+        0U,
+        15U,
+        1U,
+        uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+        const uint64_t *res_j = table + (i1 + 1U) * len1;
+        for (uint32_t i = 0U; i < len1; i++)
         {
           uint64_t *os = tmp0;
           uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
@@ -449,7 +449,7 @@ Hacl_GenericField64_exp_vartime(
   uint64_t *aMc = (uint64_t *)alloca(k1.len * sizeof (uint64_t));
   memset(aMc, 0U, k1.len * sizeof (uint64_t));
   memcpy(aMc, aM, k1.len * sizeof (uint64_t));
-  if (bBits < (uint32_t)200U)
+  if (bBits < 200U)
   {
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t *ctx = (uint64_t *)alloca((len1 + len1) * sizeof (uint64_t));
@@ -459,13 +459,13 @@ Hacl_GenericField64_exp_vartime(
     uint64_t *ctx_n = ctx;
     uint64_t *ctx_r2 = ctx + len1;
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n, k1.mu, ctx_r2, resM);
-    for (uint32_t i = (uint32_t)0U; i < bBits; i++)
+    for (uint32_t i = 0U; i < bBits; i++)
     {
-      uint32_t i1 = i / (uint32_t)64U;
-      uint32_t j = i % (uint32_t)64U;
+      uint32_t i1 = i / 64U;
+      uint32_t j = i % 64U;
       uint64_t tmp = b[i1];
-      uint64_t bit = tmp >> j & (uint64_t)1U;
-      if (!(bit == (uint64_t)0U))
+      uint64_t bit = tmp >> j & 1ULL;
+      if (!(bit == 0ULL))
       {
         uint64_t *ctx_n0 = ctx;
         Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n0, k1.mu, resM, aMc, resM);
@@ -477,22 +477,22 @@ Hacl_GenericField64_exp_vartime(
   else
   {
     uint32_t bLen;
-    if (bBits == (uint32_t)0U)
+    if (bBits == 0U)
     {
-      bLen = (uint32_t)1U;
+      bLen = 1U;
     }
     else
     {
-      bLen = (bBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+      bLen = (bBits - 1U) / 64U + 1U;
     }
     KRML_CHECK_SIZE(sizeof (uint64_t), len1 + len1);
     uint64_t *ctx = (uint64_t *)alloca((len1 + len1) * sizeof (uint64_t));
     memset(ctx, 0U, (len1 + len1) * sizeof (uint64_t));
     memcpy(ctx, k1.n, len1 * sizeof (uint64_t));
     memcpy(ctx + len1, k1.r2, len1 * sizeof (uint64_t));
-    KRML_CHECK_SIZE(sizeof (uint64_t), (uint32_t)16U * len1);
-    uint64_t *table = (uint64_t *)alloca((uint32_t)16U * len1 * sizeof (uint64_t));
-    memset(table, 0U, (uint32_t)16U * len1 * sizeof (uint64_t));
+    KRML_CHECK_SIZE(sizeof (uint64_t), 16U * len1);
+    uint64_t *table = (uint64_t *)alloca(16U * len1 * sizeof (uint64_t));
+    memset(table, 0U, 16U * len1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t *tmp = (uint64_t *)alloca(len1 * sizeof (uint64_t));
     memset(tmp, 0U, len1 * sizeof (uint64_t));
@@ -503,21 +503,21 @@ Hacl_GenericField64_exp_vartime(
     Hacl_Bignum_Montgomery_bn_from_mont_u64(len1, ctx_n0, k1.mu, ctx_r20, t0);
     memcpy(t1, aMc, len1 * sizeof (uint64_t));
     KRML_MAYBE_FOR7(i,
-      (uint32_t)0U,
-      (uint32_t)7U,
-      (uint32_t)1U,
-      uint64_t *t11 = table + (i + (uint32_t)1U) * len1;
+      0U,
+      7U,
+      1U,
+      uint64_t *t11 = table + (i + 1U) * len1;
       uint64_t *ctx_n1 = ctx;
       Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n1, k1.mu, t11, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * len1, tmp, len1 * sizeof (uint64_t));
-      uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * len1;
+      memcpy(table + (2U * i + 2U) * len1, tmp, len1 * sizeof (uint64_t));
+      uint64_t *t2 = table + (2U * i + 2U) * len1;
       uint64_t *ctx_n = ctx;
       Hacl_Bignum_Montgomery_bn_mont_mul_u64(len1, ctx_n, k1.mu, aMc, t2, tmp);
-      memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * len1, tmp, len1 * sizeof (uint64_t)););
-    if (bBits % (uint32_t)4U != (uint32_t)0U)
+      memcpy(table + (2U * i + 3U) * len1, tmp, len1 * sizeof (uint64_t)););
+    if (bBits % 4U != 0U)
     {
-      uint32_t i = bBits / (uint32_t)4U * (uint32_t)4U;
-      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, (uint32_t)4U);
+      uint32_t i = bBits / 4U * 4U;
+      uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, i, 4U);
       uint32_t bits_l32 = (uint32_t)bits_c;
       const uint64_t *a_bits_l = table + bits_l32 * len1;
       memcpy(resM, (uint64_t *)a_bits_l, len1 * sizeof (uint64_t));
@@ -531,16 +531,16 @@ Hacl_GenericField64_exp_vartime(
     KRML_CHECK_SIZE(sizeof (uint64_t), len1);
     uint64_t *tmp0 = (uint64_t *)alloca(len1 * sizeof (uint64_t));
     memset(tmp0, 0U, len1 * sizeof (uint64_t));
-    for (uint32_t i = (uint32_t)0U; i < bBits / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < bBits / 4U; i++)
     {
       KRML_MAYBE_FOR4(i0,
-        (uint32_t)0U,
-        (uint32_t)4U,
-        (uint32_t)1U,
+        0U,
+        4U,
+        1U,
         uint64_t *ctx_n = ctx;
         Hacl_Bignum_Montgomery_bn_mont_sqr_u64(len1, ctx_n, k1.mu, resM, resM););
-      uint32_t k2 = bBits - bBits % (uint32_t)4U - (uint32_t)4U * i - (uint32_t)4U;
-      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, (uint32_t)4U);
+      uint32_t k2 = bBits - bBits % 4U - 4U * i - 4U;
+      uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(bLen, b, k2, 4U);
       uint32_t bits_l32 = (uint32_t)bits_l;
       const uint64_t *a_bits_l = table + bits_l32 * len1;
       memcpy(tmp0, (uint64_t *)a_bits_l, len1 * sizeof (uint64_t));
@@ -573,38 +573,33 @@ Hacl_GenericField64_inverse(
   KRML_CHECK_SIZE(sizeof (uint64_t), len1);
   uint64_t *n2 = (uint64_t *)alloca(len1 * sizeof (uint64_t));
   memset(n2, 0U, len1 * sizeof (uint64_t));
-  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, k1.n[0U], (uint64_t)2U, n2);
+  uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(0ULL, k1.n[0U], 2ULL, n2);
   uint64_t c1;
-  if ((uint32_t)1U < len1)
+  if (1U < len1)
   {
-    uint64_t *a1 = k1.n + (uint32_t)1U;
-    uint64_t *res1 = n2 + (uint32_t)1U;
+    uint64_t *a1 = k1.n + 1U;
+    uint64_t *res1 = n2 + 1U;
     uint64_t c = c0;
-    for (uint32_t i = (uint32_t)0U; i < (len1 - (uint32_t)1U) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (len1 - 1U) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, 0ULL, res_i);
     }
-    for
-    (uint32_t
-      i = (len1 - (uint32_t)1U) / (uint32_t)4U * (uint32_t)4U;
-      i
-      < len1 - (uint32_t)1U;
-      i++)
+    for (uint32_t i = (len1 - 1U) / 4U * 4U; i < len1 - 1U; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c10 = c;
     c1 = c10;
@@ -613,7 +608,7 @@ Hacl_GenericField64_inverse(
   {
     c1 = c0;
   }
-  KRML_HOST_IGNORE(c1);
-  Hacl_GenericField64_exp_vartime(k, aM, k1.len * (uint32_t)64U, n2, aInvM);
+  KRML_MAYBE_UNUSED_VAR(c1);
+  Hacl_GenericField64_exp_vartime(k, aM, k1.len * 64U, n2, aInvM);
 }
 
diff --git a/src/msvc/Hacl_HKDF.c b/src/msvc/Hacl_HKDF.c
index ce57b82c..f3b4d90f 100644
--- a/src/msvc/Hacl_HKDF.c
+++ b/src/msvc/Hacl_HKDF.c
@@ -45,39 +45,39 @@ Hacl_HKDF_expand_sha2_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -125,39 +125,39 @@ Hacl_HKDF_expand_sha2_384(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)48U;
+  uint32_t tlen = 48U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_384(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -205,39 +205,39 @@ Hacl_HKDF_expand_sha2_512(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_sha2_512(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -285,39 +285,39 @@ Hacl_HKDF_expand_blake2s_32(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2s_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
@@ -365,39 +365,39 @@ Hacl_HKDF_expand_blake2b_32(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_compute_blake2b_32(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/msvc/Hacl_HKDF_Blake2b_256.c b/src/msvc/Hacl_HKDF_Blake2b_256.c
index 22b5549b..3280cb8f 100644
--- a/src/msvc/Hacl_HKDF_Blake2b_256.c
+++ b/src/msvc/Hacl_HKDF_Blake2b_256.c
@@ -45,47 +45,39 @@ Hacl_HKDF_Blake2b_256_expand_blake2b_256(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)64U;
+  uint32_t tlen = 64U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2b_256_compute_blake2b_256(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/msvc/Hacl_HKDF_Blake2s_128.c b/src/msvc/Hacl_HKDF_Blake2s_128.c
index 24d6cb3d..7007a4eb 100644
--- a/src/msvc/Hacl_HKDF_Blake2s_128.c
+++ b/src/msvc/Hacl_HKDF_Blake2s_128.c
@@ -45,47 +45,39 @@ Hacl_HKDF_Blake2s_128_expand_blake2s_128(
   uint32_t len
 )
 {
-  uint32_t tlen = (uint32_t)32U;
+  uint32_t tlen = 32U;
   uint32_t n = len / tlen;
   uint8_t *output = okm;
-  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + (uint32_t)1U);
-  uint8_t *text = (uint8_t *)alloca((tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
-  memset(text, 0U, (tlen + infolen + (uint32_t)1U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), tlen + infolen + 1U);
+  uint8_t *text = (uint8_t *)alloca((tlen + infolen + 1U) * sizeof (uint8_t));
+  memset(text, 0U, (tlen + infolen + 1U) * sizeof (uint8_t));
   uint8_t *text0 = text + tlen;
   uint8_t *tag = text;
   uint8_t *ctr = text + tlen + infolen;
   memcpy(text + tlen, info, infolen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
-    ctr[0U] = (uint8_t)(i + (uint32_t)1U);
-    if (i == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(i + 1U);
+    if (i == 0U)
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     memcpy(output + i * tlen, tag, tlen * sizeof (uint8_t));
   }
   if (n * tlen < len)
   {
-    ctr[0U] = (uint8_t)(n + (uint32_t)1U);
-    if (n == (uint32_t)0U)
+    ctr[0U] = (uint8_t)(n + 1U);
+    if (n == 0U)
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text0, infolen + 1U);
     }
     else
     {
-      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag,
-        prk,
-        prklen,
-        text,
-        tlen + infolen + (uint32_t)1U);
+      Hacl_HMAC_Blake2s_128_compute_blake2s_128(tag, prk, prklen, text, tlen + infolen + 1U);
     }
     uint8_t *block = okm + n * tlen;
     memcpy(block, tag, (len - n * tlen) * sizeof (uint8_t));
diff --git a/src/msvc/Hacl_HMAC.c b/src/msvc/Hacl_HMAC.c
index d46c4812..63ab2032 100644
--- a/src/msvc/Hacl_HMAC.c
+++ b/src/msvc/Hacl_HMAC.c
@@ -23,12 +23,13 @@
  */
 
 
-#include "Hacl_HMAC.h"
+#include "internal/Hacl_HMAC.h"
 
 #include "internal/Hacl_Krmllib.h"
 #include "internal/Hacl_Hash_SHA2.h"
 #include "internal/Hacl_Hash_SHA1.h"
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2s.h"
+#include "internal/Hacl_Hash_Blake2b.h"
 
 /**
 Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`.
@@ -37,7 +38,7 @@ The key can be any length and will be hashed if it is longer and padded if it is
 `dst` must point to 20 bytes of memory.
 */
 void
-Hacl_HMAC_legacy_compute_sha1(
+Hacl_HMAC_compute_sha1(
   uint8_t *dst,
   uint8_t *key,
   uint32_t key_len,
@@ -45,68 +46,63 @@ Hacl_HMAC_legacy_compute_sha1(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)20U;
+    ite = 20U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Hash_SHA1_legacy_hash(key, key_len, nkey);
+    Hacl_Hash_SHA1_hash_oneshot(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)0U, ipad, (uint32_t)64U);
+    Hacl_Hash_SHA1_update_last(s, 0ULL, ipad, 64U);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -118,25 +114,21 @@ Hacl_HMAC_legacy_compute_sha1(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_Hash_SHA1_legacy_update_multi(s, ipad, (uint32_t)1U);
-    Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-    Hacl_Hash_SHA1_legacy_update_last(s,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-      rem,
-      rem_len);
+    Hacl_Hash_SHA1_update_multi(s, ipad, 1U);
+    Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks);
+    Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
   }
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst1);
+  Hacl_Hash_SHA1_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_Hash_Core_SHA1_legacy_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)20U / block_len;
-  uint32_t rem0 = (uint32_t)20U % block_len;
+  Hacl_Hash_SHA1_init(s);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 20U / block_len;
+  uint32_t rem0 = 20U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)20U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 20U - n_blocks_ * block_len });
   }
   else
   {
@@ -147,13 +139,10 @@ Hacl_HMAC_legacy_compute_sha1(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, opad, (uint32_t)1U);
-  Hacl_Hash_SHA1_legacy_update_multi(s, full_blocks, n_blocks);
-  Hacl_Hash_SHA1_legacy_update_last(s,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
-    rem,
-    rem_len);
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
+  Hacl_Hash_SHA1_update_multi(s, opad, 1U);
+  Hacl_Hash_SHA1_update_multi(s, full_blocks, n_blocks);
+  Hacl_Hash_SHA1_update_last(s, (uint64_t)64U + (uint64_t)full_blocks_len, rem, rem_len);
+  Hacl_Hash_SHA1_finish(s, dst);
 }
 
 /**
@@ -171,74 +160,71 @@ Hacl_HMAC_compute_sha2_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_256(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_256(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = st;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t x = Hacl_Hash_SHA2_h256[i];
     os[i] = x;);
   uint32_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)0U + (uint64_t)(uint32_t)64U,
-      (uint32_t)64U,
-      ipad,
-      s);
+    Hacl_Hash_SHA2_sha256_update_last(0ULL + (uint64_t)64U, 64U, ipad, s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -250,27 +236,24 @@ Hacl_HMAC_compute_sha2_256(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, ipad, s);
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * (uint32_t)64U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
-      + (uint64_t)full_blocks_len
-      + (uint64_t)rem_len,
+    Hacl_Hash_SHA2_sha256_update_nblocks(64U, ipad, s);
+    Hacl_Hash_SHA2_sha256_update_nblocks(n_blocks * 64U, full_blocks, s);
+    Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len,
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst1);
+  Hacl_Hash_SHA2_sha256_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha256_init(s);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_SHA2_sha256_init(s);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -281,15 +264,13 @@ Hacl_HMAC_compute_sha2_256(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, opad, s);
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(n_blocks * (uint32_t)64U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha256_update_last((uint64_t)(uint32_t)64U
-    + (uint64_t)full_blocks_len
-    + (uint64_t)rem_len,
+  Hacl_Hash_SHA2_sha256_update_nblocks(64U, opad, s);
+  Hacl_Hash_SHA2_sha256_update_nblocks(n_blocks * 64U, full_blocks, s);
+  Hacl_Hash_SHA2_sha256_update_last((uint64_t)64U + (uint64_t)full_blocks_len + (uint64_t)rem_len,
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha256_finish(s, dst);
+  Hacl_Hash_SHA2_sha256_finish(s, dst);
 }
 
 /**
@@ -307,75 +288,75 @@ Hacl_HMAC_compute_sha2_384(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)48U;
+    ite = 48U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_384(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_384(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
+    uint64_t x = Hacl_Hash_SHA2_h384[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -387,27 +368,26 @@ Hacl_HMAC_compute_sha2_384(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_Hash_SHA2_sha384_update_nblocks(128U, ipad, s);
+    Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha384_finish(s, dst1);
+  Hacl_Hash_SHA2_sha384_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha384_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)48U / block_len;
-  uint32_t rem0 = (uint32_t)48U % block_len;
+  Hacl_Hash_SHA2_sha384_init(s);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 48U / block_len;
+  uint32_t rem0 = 48U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)48U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 48U - n_blocks_ * block_len });
   }
   else
   {
@@ -418,15 +398,15 @@ Hacl_HMAC_compute_sha2_384(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_Hash_SHA2_sha384_update_nblocks(128U, opad, s);
+  Hacl_Hash_SHA2_sha384_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha384_finish(s, dst);
+  Hacl_Hash_SHA2_sha384_finish(s, dst);
 }
 
 /**
@@ -444,75 +424,75 @@ Hacl_HMAC_compute_sha2_512(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Streaming_SHA2_hash_512(key, key_len, nkey);
+    Hacl_Hash_SHA2_hash_512(nkey, key, key_len);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t st[8U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = st;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
+    uint64_t x = Hacl_Hash_SHA2_h512[i];
     os[i] = x;);
   uint64_t *s = st;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-        FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U)),
-      (uint32_t)128U,
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(0ULL),
+        FStar_UInt128_uint64_to_uint128((uint64_t)128U)),
+      128U,
       ipad,
       s);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -524,27 +504,26 @@ Hacl_HMAC_compute_sha2_512(
     uint32_t full_blocks_len = n_blocks * block_len;
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, ipad, s);
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-    Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    Hacl_Hash_SHA2_sha512_update_nblocks(128U, ipad, s);
+    Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+    Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
           FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
         FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
       rem_len,
       rem,
       s);
   }
-  Hacl_SHA2_Scalar32_sha512_finish(s, dst1);
+  Hacl_Hash_SHA2_sha512_finish(s, dst1);
   uint8_t *hash1 = ipad;
-  Hacl_SHA2_Scalar32_sha512_init(s);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_SHA2_sha512_init(s);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -555,15 +534,15 @@ Hacl_HMAC_compute_sha2_512(
   uint32_t full_blocks_len = n_blocks * block_len;
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, opad, s);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(n_blocks * (uint32_t)128U, full_blocks, s);
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+  Hacl_Hash_SHA2_sha512_update_nblocks(128U, opad, s);
+  Hacl_Hash_SHA2_sha512_update_nblocks(n_blocks * 128U, full_blocks, s);
+  Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       FStar_UInt128_uint64_to_uint128((uint64_t)rem_len)),
     rem_len,
     rem,
     s);
-  Hacl_SHA2_Scalar32_sha512_finish(s, dst);
+  Hacl_Hash_SHA2_sha512_finish(s, dst);
 }
 
 /**
@@ -581,66 +560,66 @@ Hacl_HMAC_compute_blake2s_32(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_32_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2s_hash_with_key(nkey, 32U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint32_t s[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Hash_Blake2s_init(s, 0U, 32U);
   uint32_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Hash_Blake2s_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -653,34 +632,33 @@ Hacl_HMAC_compute_blake2s_32(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint32_t wv[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     uint32_t wv0[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Hash_Blake2s_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
       full_blocks,
       n_blocks);
     uint32_t wv1[16U] = { 0U };
-    Hacl_Blake2s_32_blake2s_update_last(rem_len,
+    Hacl_Hash_Blake2s_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Hash_Blake2s_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_32_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_Blake2s_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -692,22 +670,22 @@ Hacl_HMAC_compute_blake2s_32(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint32_t wv[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Hash_Blake2s_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   uint32_t wv0[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Hash_Blake2s_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
     full_blocks,
     n_blocks);
   uint32_t wv1[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_update_last(rem_len,
+  Hacl_Hash_Blake2s_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Hash_Blake2s_finish(32U, dst, s0);
 }
 
 /**
@@ -725,71 +703,66 @@ Hacl_HMAC_compute_blake2b_32(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_32_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2b_hash_with_key(nkey, 64U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   uint64_t s[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Hash_Blake2b_init(s, 0U, 64U);
   uint64_t *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last((uint32_t)128U,
-      wv,
-      s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
-      ipad);
+    Hacl_Hash_Blake2b_update_last(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), 128U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -802,40 +775,34 @@ Hacl_HMAC_compute_blake2b_32(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     uint64_t wv[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-      wv,
-      s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      ipad,
-      (uint32_t)1U);
+    Hacl_Hash_Blake2b_update_multi(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), ipad, 1U);
     uint64_t wv0[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Hash_Blake2b_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
       full_blocks,
       n_blocks);
     uint64_t wv1[16U] = { 0U };
-    Hacl_Blake2b_32_blake2b_update_last(rem_len,
+    Hacl_Hash_Blake2b_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Hash_Blake2b_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_32_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_Blake2b_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -847,27 +814,22 @@ Hacl_HMAC_compute_blake2b_32(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   uint64_t wv[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-    wv,
-    s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-    opad,
-    (uint32_t)1U);
+  Hacl_Hash_Blake2b_update_multi(128U, wv, s0, FStar_UInt128_uint64_to_uint128(0ULL), opad, 1U);
   uint64_t wv0[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Hash_Blake2b_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
     full_blocks,
     n_blocks);
   uint64_t wv1[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_update_last(rem_len,
+  Hacl_Hash_Blake2b_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Hash_Blake2b_finish(64U, dst, s0);
 }
 
diff --git a/src/msvc/Hacl_HMAC_Blake2b_256.c b/src/msvc/Hacl_HMAC_Blake2b_256.c
index 20b050de..cd16e65e 100644
--- a/src/msvc/Hacl_HMAC_Blake2b_256.c
+++ b/src/msvc/Hacl_HMAC_Blake2b_256.c
@@ -26,7 +26,8 @@
 #include "Hacl_HMAC_Blake2b_256.h"
 
 #include "internal/Hacl_Krmllib.h"
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2b_Simd256.h"
+#include "internal/Hacl_HMAC.h"
 
 /**
 Write the HMAC-BLAKE2b MAC of a message (`data`) by using a key (`key`) into `dst`.
@@ -43,71 +44,71 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)128U;
+  uint32_t l = 128U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)128U)
+  if (key_len <= 128U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)128U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 128U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2b_256_blake2b((uint32_t)64U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2b_Simd256_hash_with_key(nkey, 64U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 s[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_init(s, (uint32_t)0U, (uint32_t)64U);
+  Hacl_Hash_Blake2b_Simd256_init(s, 0U, 64U);
   Lib_IntVector_Intrinsics_vec256 *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_last((uint32_t)128U,
+    Hacl_Hash_Blake2b_Simd256_update_last(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
-      (uint32_t)128U,
+      FStar_UInt128_uint64_to_uint128(0ULL),
+      128U,
       ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)128U;
+    uint32_t block_len = 128U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -120,40 +121,39 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+    Hacl_Hash_Blake2b_Simd256_update_multi(128U,
       wv,
       s0,
-      FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+      FStar_UInt128_uint64_to_uint128(0ULL),
       ipad,
-      (uint32_t)1U);
+      1U);
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_multi(n_blocks * (uint32_t)128U,
+    Hacl_Hash_Blake2b_Simd256_update_multi(n_blocks * 128U,
       wv0,
       s0,
       FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
       full_blocks,
       n_blocks);
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv1[4U] KRML_POST_ALIGN(32) = { 0U };
-    Hacl_Blake2b_256_blake2b_update_last(rem_len,
+    Hacl_Hash_Blake2b_Simd256_update_last(rem_len,
       wv1,
       s0,
-      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+      FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
         FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
       rem_len,
       rem);
   }
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst1, s0);
+  Hacl_Hash_Blake2b_Simd256_finish(64U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2b_256_blake2b_init(s0, (uint32_t)0U, (uint32_t)64U);
-  uint32_t block_len = (uint32_t)128U;
-  uint32_t n_blocks0 = (uint32_t)64U / block_len;
-  uint32_t rem0 = (uint32_t)64U % block_len;
+  Hacl_Hash_Blake2b_Simd256_init(s0, 0U, 64U);
+  uint32_t block_len = 128U;
+  uint32_t n_blocks0 = 64U / block_len;
+  uint32_t rem0 = 64U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)64U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 64U - n_blocks_ * block_len });
   }
   else
   {
@@ -165,27 +165,27 @@ Hacl_HMAC_Blake2b_256_compute_blake2b_256(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
+  Hacl_Hash_Blake2b_Simd256_update_multi(128U,
     wv,
     s0,
-    FStar_UInt128_uint64_to_uint128((uint64_t)0U),
+    FStar_UInt128_uint64_to_uint128(0ULL),
     opad,
-    (uint32_t)1U);
+    1U);
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_multi(n_blocks * (uint32_t)128U,
+  Hacl_Hash_Blake2b_Simd256_update_multi(n_blocks * 128U,
     wv0,
     s0,
     FStar_UInt128_uint64_to_uint128((uint64_t)block_len),
     full_blocks,
     n_blocks);
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv1[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_update_last(rem_len,
+  Hacl_Hash_Blake2b_Simd256_update_last(rem_len,
     wv1,
     s0,
-    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U),
+    FStar_UInt128_add(FStar_UInt128_uint64_to_uint128((uint64_t)128U),
       FStar_UInt128_uint64_to_uint128((uint64_t)full_blocks_len)),
     rem_len,
     rem);
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, s0);
+  Hacl_Hash_Blake2b_Simd256_finish(64U, dst, s0);
 }
 
diff --git a/src/msvc/Hacl_HMAC_Blake2s_128.c b/src/msvc/Hacl_HMAC_Blake2s_128.c
index 144722e4..bf2033a8 100644
--- a/src/msvc/Hacl_HMAC_Blake2s_128.c
+++ b/src/msvc/Hacl_HMAC_Blake2s_128.c
@@ -25,7 +25,8 @@
 
 #include "Hacl_HMAC_Blake2s_128.h"
 
-#include "internal/Hacl_Hash_Blake2.h"
+#include "internal/Hacl_Hash_Blake2s_Simd128.h"
+#include "internal/Hacl_HMAC.h"
 
 /**
 Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`.
@@ -42,66 +43,66 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   uint32_t data_len
 )
 {
-  uint32_t l = (uint32_t)64U;
+  uint32_t l = 64U;
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *key_block = (uint8_t *)alloca(l * sizeof (uint8_t));
   memset(key_block, 0U, l * sizeof (uint8_t));
   uint8_t *nkey = key_block;
   uint32_t ite;
-  if (key_len <= (uint32_t)64U)
+  if (key_len <= 64U)
   {
     ite = key_len;
   }
   else
   {
-    ite = (uint32_t)32U;
+    ite = 32U;
   }
   uint8_t *zeroes = key_block + ite;
-  KRML_HOST_IGNORE(zeroes);
-  if (key_len <= (uint32_t)64U)
+  KRML_MAYBE_UNUSED_VAR(zeroes);
+  if (key_len <= 64U)
   {
     memcpy(nkey, key, key_len * sizeof (uint8_t));
   }
   else
   {
-    Hacl_Blake2s_128_blake2s((uint32_t)32U, nkey, key_len, key, (uint32_t)0U, NULL);
+    Hacl_Hash_Blake2s_Simd128_hash_with_key(nkey, 32U, key, key_len, NULL, 0U);
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *ipad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(ipad, (uint8_t)0x36U, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(ipad, 0x36U, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = ipad[i];
     uint8_t yi = key_block[i];
-    ipad[i] = xi ^ yi;
+    ipad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_CHECK_SIZE(sizeof (uint8_t), l);
   uint8_t *opad = (uint8_t *)alloca(l * sizeof (uint8_t));
-  memset(opad, (uint8_t)0x5cU, l * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < l; i++)
+  memset(opad, 0x5cU, l * sizeof (uint8_t));
+  for (uint32_t i = 0U; i < l; i++)
   {
     uint8_t xi = opad[i];
     uint8_t yi = key_block[i];
-    opad[i] = xi ^ yi;
+    opad[i] = (uint32_t)xi ^ (uint32_t)yi;
   }
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 s[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_init(s, (uint32_t)0U, (uint32_t)32U);
+  Hacl_Hash_Blake2s_Simd128_init(s, 0U, 32U);
   Lib_IntVector_Intrinsics_vec128 *s0 = s;
   uint8_t *dst1 = ipad;
-  if (data_len == (uint32_t)0U)
+  if (data_len == 0U)
   {
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_last((uint32_t)64U, wv, s0, (uint64_t)0U, (uint32_t)64U, ipad);
+    Hacl_Hash_Blake2s_Simd128_update_last(64U, wv, s0, 0ULL, 64U, ipad);
   }
   else
   {
-    uint32_t block_len = (uint32_t)64U;
+    uint32_t block_len = 64U;
     uint32_t n_blocks0 = data_len / block_len;
     uint32_t rem0 = data_len % block_len;
     K___uint32_t_uint32_t scrut;
-    if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+    if (n_blocks0 > 0U && rem0 == 0U)
     {
-      uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
+      uint32_t n_blocks_ = n_blocks0 - 1U;
       scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = data_len - n_blocks_ * block_len });
     }
     else
@@ -114,34 +115,33 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
     uint8_t *full_blocks = data;
     uint8_t *rem = data + full_blocks_len;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, ipad, (uint32_t)1U);
+    Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, s0, 0ULL, ipad, 1U);
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_multi(n_blocks * (uint32_t)64U,
+    Hacl_Hash_Blake2s_Simd128_update_multi(n_blocks * 64U,
       wv0,
       s0,
       (uint64_t)block_len,
       full_blocks,
       n_blocks);
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv1[4U] KRML_POST_ALIGN(16) = { 0U };
-    Hacl_Blake2s_128_blake2s_update_last(rem_len,
+    Hacl_Hash_Blake2s_Simd128_update_last(rem_len,
       wv1,
       s0,
-      (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+      (uint64_t)64U + (uint64_t)full_blocks_len,
       rem_len,
       rem);
   }
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst1, s0);
+  Hacl_Hash_Blake2s_Simd128_finish(32U, dst1, s0);
   uint8_t *hash1 = ipad;
-  Hacl_Blake2s_128_blake2s_init(s0, (uint32_t)0U, (uint32_t)32U);
-  uint32_t block_len = (uint32_t)64U;
-  uint32_t n_blocks0 = (uint32_t)32U / block_len;
-  uint32_t rem0 = (uint32_t)32U % block_len;
+  Hacl_Hash_Blake2s_Simd128_init(s0, 0U, 32U);
+  uint32_t block_len = 64U;
+  uint32_t n_blocks0 = 32U / block_len;
+  uint32_t rem0 = 32U % block_len;
   K___uint32_t_uint32_t scrut;
-  if (n_blocks0 > (uint32_t)0U && rem0 == (uint32_t)0U)
+  if (n_blocks0 > 0U && rem0 == 0U)
   {
-    uint32_t n_blocks_ = n_blocks0 - (uint32_t)1U;
-    scrut =
-      ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = (uint32_t)32U - n_blocks_ * block_len });
+    uint32_t n_blocks_ = n_blocks0 - 1U;
+    scrut = ((K___uint32_t_uint32_t){ .fst = n_blocks_, .snd = 32U - n_blocks_ * block_len });
   }
   else
   {
@@ -153,21 +153,21 @@ Hacl_HMAC_Blake2s_128_compute_blake2s_128(
   uint8_t *full_blocks = hash1;
   uint8_t *rem = hash1 + full_blocks_len;
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, s0, (uint64_t)0U, opad, (uint32_t)1U);
+  Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, s0, 0ULL, opad, 1U);
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_multi(n_blocks * (uint32_t)64U,
+  Hacl_Hash_Blake2s_Simd128_update_multi(n_blocks * 64U,
     wv0,
     s0,
     (uint64_t)block_len,
     full_blocks,
     n_blocks);
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv1[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_update_last(rem_len,
+  Hacl_Hash_Blake2s_Simd128_update_last(rem_len,
     wv1,
     s0,
-    (uint64_t)(uint32_t)64U + (uint64_t)full_blocks_len,
+    (uint64_t)64U + (uint64_t)full_blocks_len,
     rem_len,
     rem);
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, s0);
+  Hacl_Hash_Blake2s_Simd128_finish(32U, dst, s0);
 }
 
diff --git a/src/msvc/Hacl_HMAC_DRBG.c b/src/msvc/Hacl_HMAC_DRBG.c
index b3acf354..8f754afb 100644
--- a/src/msvc/Hacl_HMAC_DRBG.c
+++ b/src/msvc/Hacl_HMAC_DRBG.c
@@ -25,15 +25,15 @@
 
 #include "Hacl_HMAC_DRBG.h"
 
-uint32_t Hacl_HMAC_DRBG_reseed_interval = (uint32_t)1024U;
+uint32_t Hacl_HMAC_DRBG_reseed_interval = 1024U;
 
-uint32_t Hacl_HMAC_DRBG_max_output_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_output_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_personalization_string_length = 65536U;
 
-uint32_t Hacl_HMAC_DRBG_max_additional_input_length = (uint32_t)65536U;
+uint32_t Hacl_HMAC_DRBG_max_additional_input_length = 65536U;
 
 /**
 Return the minimal entropy input length of the desired hash function.
@@ -46,19 +46,19 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     default:
       {
@@ -71,8 +71,8 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a)
 bool
 Hacl_HMAC_DRBG_uu___is_State(Spec_Hash_Definitions_hash_alg a, Hacl_HMAC_DRBG_state projectee)
 {
-  KRML_HOST_IGNORE(a);
-  KRML_HOST_IGNORE(projectee);
+  KRML_MAYBE_UNUSED_VAR(a);
+  KRML_MAYBE_UNUSED_VAR(projectee);
   return true;
 }
 
@@ -92,25 +92,25 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         k = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         k = buf;
         break;
       }
@@ -125,25 +125,25 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA1:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)20U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(20U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)48U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(48U, sizeof (uint8_t));
         v = buf;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
+        uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
         v = buf;
         break;
       }
@@ -154,7 +154,7 @@ Hacl_HMAC_DRBG_state Hacl_HMAC_DRBG_create_in(Spec_Hash_Definitions_hash_alg a)
       }
   }
   uint32_t *ctr = (uint32_t *)KRML_HOST_MALLOC(sizeof (uint32_t));
-  ctr[0U] = (uint32_t)1U;
+  ctr[0U] = 1U;
   return ((Hacl_HMAC_DRBG_state){ .k = k, .v = v, .reseed_counter = ctr });
 }
 
@@ -203,45 +203,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)20U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)20U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 20U * sizeof (uint8_t));
+        memset(v, 1U, 20U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 21U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U,
+          memcpy(input0 + 21U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)21U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 21U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
+            memcpy(input + 21U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
         break;
       }
@@ -264,45 +262,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)32U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)32U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 32U * sizeof (uint8_t));
+        memset(v, 1U, 32U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 33U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U,
+          memcpy(input0 + 33U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)33U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 33U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
+            memcpy(input + 33U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
         break;
       }
@@ -325,45 +321,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)48U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)48U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 48U * sizeof (uint8_t));
+        memset(v, 1U, 48U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 49U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U,
+          memcpy(input0 + 49U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)49U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 49U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
+            memcpy(input + 49U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
         break;
       }
@@ -386,45 +380,43 @@ Hacl_HMAC_DRBG_instantiate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        memset(k, 0U, (uint32_t)64U * sizeof (uint8_t));
-        memset(v, (uint8_t)1U, (uint32_t)64U * sizeof (uint8_t));
-        ctr[0U] = (uint32_t)1U;
-        uint32_t
-        input_len = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+        memset(k, 0U, 64U * sizeof (uint8_t));
+        memset(v, 1U, 64U * sizeof (uint8_t));
+        ctr[0U] = 1U;
+        uint32_t input_len = 65U + entropy_input_len + nonce_len + personalization_string_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U,
+          memcpy(input0 + 65U,
             seed_material,
             (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (entropy_input_len + nonce_len + personalization_string_len != 0U)
         {
-          uint32_t
-          input_len0 = (uint32_t)65U + entropy_input_len + nonce_len + personalization_string_len;
+          uint32_t input_len0 = 65U + entropy_input_len + nonce_len + personalization_string_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (entropy_input_len + nonce_len + personalization_string_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (entropy_input_len + nonce_len + personalization_string_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
+            memcpy(input + 65U,
               seed_material,
               (entropy_input_len + nonce_len + personalization_string_len) * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
         break;
       }
@@ -474,42 +466,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)21U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 21U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U,
+          memcpy(input0 + 21U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)21U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 21U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
+            memcpy(input + 21U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_256:
@@ -528,42 +520,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)33U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 33U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U,
+          memcpy(input0 + 33U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)33U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 33U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
+            memcpy(input + 33U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
@@ -582,42 +574,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)49U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 49U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U,
+          memcpy(input0 + 49U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)49U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 49U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
+            memcpy(input + 49U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
@@ -636,42 +628,42 @@ Hacl_HMAC_DRBG_reseed(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        uint32_t input_len = (uint32_t)65U + entropy_input_len + additional_input_input_len;
+        uint32_t input_len = 65U + entropy_input_len + additional_input_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U,
+          memcpy(input0 + 65U,
             seed_material,
             (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (entropy_input_len + additional_input_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)65U + entropy_input_len + additional_input_input_len;
+          uint32_t input_len0 = 65U + entropy_input_len + additional_input_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (entropy_input_len + additional_input_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (entropy_input_len + additional_input_input_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
+            memcpy(input + 65U,
               seed_material,
               (entropy_input_len + additional_input_input_len) * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
-        ctr[0U] = (uint32_t)1U;
+        ctr[0U] = 1U;
         break;
       }
     default:
@@ -713,93 +705,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)21U + additional_input_len;
+          uint32_t input_len = 21U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)21U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[20U] = (uint8_t)0U;
-          Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-          Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[20U] = 0U;
+          Hacl_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+          Hacl_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+          memcpy(k, k_, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+            uint32_t input_len0 = 21U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 20U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)21U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[20U] = (uint8_t)1U;
-            Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-            Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-            memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+            input[20U] = 1U;
+            Hacl_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+            Hacl_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+            memcpy(k, k_0, 20U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)20U;
+        uint32_t max = n / 20U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(out + i * (uint32_t)20U, v, (uint32_t)20U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha1(v, k, 20U, v, 20U);
+          memcpy(out + i * 20U, v, 20U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)20U < n)
+        if (max * 20U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)20U;
-          Hacl_HMAC_legacy_compute_sha1(v, k, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(block, v, (n - max * (uint32_t)20U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 20U;
+          Hacl_HMAC_compute_sha1(v, k, 20U, v, 20U);
+          memcpy(block, v, (n - max * 20U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)21U + additional_input_len;
+        uint32_t input_len = 21U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)20U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 20U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)21U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 21U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[20U] = (uint8_t)0U;
-        Hacl_HMAC_legacy_compute_sha1(k_, k, (uint32_t)20U, input0, input_len);
-        Hacl_HMAC_legacy_compute_sha1(v, k_, (uint32_t)20U, v, (uint32_t)20U);
-        memcpy(k, k_, (uint32_t)20U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[20U] = 0U;
+        Hacl_HMAC_compute_sha1(k_, k, 20U, input0, input_len);
+        Hacl_HMAC_compute_sha1(v, k_, 20U, v, 20U);
+        memcpy(k, k_, 20U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)21U + additional_input_len;
+          uint32_t input_len0 = 21U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)20U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 20U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)21U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 21U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[20U] = (uint8_t)1U;
-          Hacl_HMAC_legacy_compute_sha1(k_0, k, (uint32_t)20U, input, input_len0);
-          Hacl_HMAC_legacy_compute_sha1(v, k_0, (uint32_t)20U, v, (uint32_t)20U);
-          memcpy(k, k_0, (uint32_t)20U * sizeof (uint8_t));
+          input[20U] = 1U;
+          Hacl_HMAC_compute_sha1(k_0, k, 20U, input, input_len0);
+          Hacl_HMAC_compute_sha1(v, k_0, 20U, v, 20U);
+          memcpy(k, k_0, 20U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_256:
@@ -811,93 +797,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)33U + additional_input_len;
+          uint32_t input_len = 33U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)33U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[32U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-          Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[32U] = 0U;
+          Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+          Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+          memcpy(k, k_, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+            uint32_t input_len0 = 33U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 32U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)33U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[32U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-            Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-            memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+            input[32U] = 1U;
+            Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+            Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+            memcpy(k, k_0, 32U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)32U;
+        uint32_t max = n / 32U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(out + i * (uint32_t)32U, v, (uint32_t)32U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+          memcpy(out + i * 32U, v, 32U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)32U < n)
+        if (max * 32U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)32U;
-          Hacl_HMAC_compute_sha2_256(v, k, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(block, v, (n - max * (uint32_t)32U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 32U;
+          Hacl_HMAC_compute_sha2_256(v, k, 32U, v, 32U);
+          memcpy(block, v, (n - max * 32U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)33U + additional_input_len;
+        uint32_t input_len = 33U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)32U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 32U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)33U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 33U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[32U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_256(k_, k, (uint32_t)32U, input0, input_len);
-        Hacl_HMAC_compute_sha2_256(v, k_, (uint32_t)32U, v, (uint32_t)32U);
-        memcpy(k, k_, (uint32_t)32U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[32U] = 0U;
+        Hacl_HMAC_compute_sha2_256(k_, k, 32U, input0, input_len);
+        Hacl_HMAC_compute_sha2_256(v, k_, 32U, v, 32U);
+        memcpy(k, k_, 32U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)33U + additional_input_len;
+          uint32_t input_len0 = 33U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)32U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 32U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)33U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 33U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[32U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_256(k_0, k, (uint32_t)32U, input, input_len0);
-          Hacl_HMAC_compute_sha2_256(v, k_0, (uint32_t)32U, v, (uint32_t)32U);
-          memcpy(k, k_0, (uint32_t)32U * sizeof (uint8_t));
+          input[32U] = 1U;
+          Hacl_HMAC_compute_sha2_256(k_0, k, 32U, input, input_len0);
+          Hacl_HMAC_compute_sha2_256(v, k_0, 32U, v, 32U);
+          memcpy(k, k_0, 32U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_384:
@@ -909,93 +889,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)49U + additional_input_len;
+          uint32_t input_len = 49U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)49U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[48U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-          Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[48U] = 0U;
+          Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+          Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+          memcpy(k, k_, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+            uint32_t input_len0 = 49U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 48U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)49U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[48U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-            Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-            memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+            input[48U] = 1U;
+            Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+            Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+            memcpy(k, k_0, 48U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)48U;
+        uint32_t max = n / 48U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(out + i * (uint32_t)48U, v, (uint32_t)48U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+          memcpy(out + i * 48U, v, 48U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)48U < n)
+        if (max * 48U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)48U;
-          Hacl_HMAC_compute_sha2_384(v, k, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(block, v, (n - max * (uint32_t)48U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 48U;
+          Hacl_HMAC_compute_sha2_384(v, k, 48U, v, 48U);
+          memcpy(block, v, (n - max * 48U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)49U + additional_input_len;
+        uint32_t input_len = 49U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)48U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 48U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)49U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 49U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[48U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_384(k_, k, (uint32_t)48U, input0, input_len);
-        Hacl_HMAC_compute_sha2_384(v, k_, (uint32_t)48U, v, (uint32_t)48U);
-        memcpy(k, k_, (uint32_t)48U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[48U] = 0U;
+        Hacl_HMAC_compute_sha2_384(k_, k, 48U, input0, input_len);
+        Hacl_HMAC_compute_sha2_384(v, k_, 48U, v, 48U);
+        memcpy(k, k_, 48U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)49U + additional_input_len;
+          uint32_t input_len0 = 49U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)48U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 48U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)49U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 49U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[48U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_384(k_0, k, (uint32_t)48U, input, input_len0);
-          Hacl_HMAC_compute_sha2_384(v, k_0, (uint32_t)48U, v, (uint32_t)48U);
-          memcpy(k, k_0, (uint32_t)48U * sizeof (uint8_t));
+          input[48U] = 1U;
+          Hacl_HMAC_compute_sha2_384(k_0, k, 48U, input, input_len0);
+          Hacl_HMAC_compute_sha2_384(v, k_0, 48U, v, 48U);
+          memcpy(k, k_0, 48U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     case Spec_Hash_Definitions_SHA2_512:
@@ -1007,93 +981,87 @@ Hacl_HMAC_DRBG_generate(
         uint8_t *k = st.k;
         uint8_t *v = st.v;
         uint32_t *ctr = st.reseed_counter;
-        if (additional_input_len > (uint32_t)0U)
+        if (additional_input_len > 0U)
         {
-          uint32_t input_len = (uint32_t)65U + additional_input_len;
+          uint32_t input_len = 65U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
           uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
           memset(input0, 0U, input_len * sizeof (uint8_t));
           uint8_t *k_ = input0;
-          memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_, v, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input0 + (uint32_t)65U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input0[64U] = (uint8_t)0U;
-          Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-          Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          input0[64U] = 0U;
+          Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+          Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+          memcpy(k, k_, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+            uint32_t input_len0 = 65U + additional_input_len;
             KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
             uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
             memset(input, 0U, input_len0 * sizeof (uint8_t));
             uint8_t *k_0 = input;
-            memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-            if (additional_input_len != (uint32_t)0U)
+            memcpy(k_0, v, 64U * sizeof (uint8_t));
+            if (additional_input_len != 0U)
             {
-              memcpy(input + (uint32_t)65U,
-                additional_input,
-                additional_input_len * sizeof (uint8_t));
+              memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
             }
-            input[64U] = (uint8_t)1U;
-            Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-            Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-            memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+            input[64U] = 1U;
+            Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+            Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+            memcpy(k, k_0, 64U * sizeof (uint8_t));
           }
         }
         uint8_t *output1 = output;
-        uint32_t max = n / (uint32_t)64U;
+        uint32_t max = n / 64U;
         uint8_t *out = output1;
-        for (uint32_t i = (uint32_t)0U; i < max; i++)
+        for (uint32_t i = 0U; i < max; i++)
         {
-          Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(out + i * (uint32_t)64U, v, (uint32_t)64U * sizeof (uint8_t));
+          Hacl_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+          memcpy(out + i * 64U, v, 64U * sizeof (uint8_t));
         }
-        if (max * (uint32_t)64U < n)
+        if (max * 64U < n)
         {
-          uint8_t *block = output1 + max * (uint32_t)64U;
-          Hacl_HMAC_compute_sha2_512(v, k, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(block, v, (n - max * (uint32_t)64U) * sizeof (uint8_t));
+          uint8_t *block = output1 + max * 64U;
+          Hacl_HMAC_compute_sha2_512(v, k, 64U, v, 64U);
+          memcpy(block, v, (n - max * 64U) * sizeof (uint8_t));
         }
-        uint32_t input_len = (uint32_t)65U + additional_input_len;
+        uint32_t input_len = 65U + additional_input_len;
         KRML_CHECK_SIZE(sizeof (uint8_t), input_len);
         uint8_t *input0 = (uint8_t *)alloca(input_len * sizeof (uint8_t));
         memset(input0, 0U, input_len * sizeof (uint8_t));
         uint8_t *k_ = input0;
-        memcpy(k_, v, (uint32_t)64U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        memcpy(k_, v, 64U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          memcpy(input0 + (uint32_t)65U, additional_input, additional_input_len * sizeof (uint8_t));
+          memcpy(input0 + 65U, additional_input, additional_input_len * sizeof (uint8_t));
         }
-        input0[64U] = (uint8_t)0U;
-        Hacl_HMAC_compute_sha2_512(k_, k, (uint32_t)64U, input0, input_len);
-        Hacl_HMAC_compute_sha2_512(v, k_, (uint32_t)64U, v, (uint32_t)64U);
-        memcpy(k, k_, (uint32_t)64U * sizeof (uint8_t));
-        if (additional_input_len != (uint32_t)0U)
+        input0[64U] = 0U;
+        Hacl_HMAC_compute_sha2_512(k_, k, 64U, input0, input_len);
+        Hacl_HMAC_compute_sha2_512(v, k_, 64U, v, 64U);
+        memcpy(k, k_, 64U * sizeof (uint8_t));
+        if (additional_input_len != 0U)
         {
-          uint32_t input_len0 = (uint32_t)65U + additional_input_len;
+          uint32_t input_len0 = 65U + additional_input_len;
           KRML_CHECK_SIZE(sizeof (uint8_t), input_len0);
           uint8_t *input = (uint8_t *)alloca(input_len0 * sizeof (uint8_t));
           memset(input, 0U, input_len0 * sizeof (uint8_t));
           uint8_t *k_0 = input;
-          memcpy(k_0, v, (uint32_t)64U * sizeof (uint8_t));
-          if (additional_input_len != (uint32_t)0U)
+          memcpy(k_0, v, 64U * sizeof (uint8_t));
+          if (additional_input_len != 0U)
           {
-            memcpy(input + (uint32_t)65U,
-              additional_input,
-              additional_input_len * sizeof (uint8_t));
+            memcpy(input + 65U, additional_input, additional_input_len * sizeof (uint8_t));
           }
-          input[64U] = (uint8_t)1U;
-          Hacl_HMAC_compute_sha2_512(k_0, k, (uint32_t)64U, input, input_len0);
-          Hacl_HMAC_compute_sha2_512(v, k_0, (uint32_t)64U, v, (uint32_t)64U);
-          memcpy(k, k_0, (uint32_t)64U * sizeof (uint8_t));
+          input[64U] = 1U;
+          Hacl_HMAC_compute_sha2_512(k_0, k, 64U, input, input_len0);
+          Hacl_HMAC_compute_sha2_512(v, k_0, 64U, v, 64U);
+          memcpy(k, k_0, 64U * sizeof (uint8_t));
         }
         uint32_t old_ctr = ctr[0U];
-        ctr[0U] = old_ctr + (uint32_t)1U;
+        ctr[0U] = old_ctr + 1U;
         return true;
       }
     default:
@@ -1106,7 +1074,7 @@ Hacl_HMAC_DRBG_generate(
 
 void Hacl_HMAC_DRBG_free(Spec_Hash_Definitions_hash_alg uu___, Hacl_HMAC_DRBG_state s)
 {
-  KRML_HOST_IGNORE(uu___);
+  KRML_MAYBE_UNUSED_VAR(uu___);
   uint8_t *k = s.k;
   uint8_t *v = s.v;
   uint32_t *ctr = s.reseed_counter;
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c b/src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c
index f05fd2bd..ccb6c4f6 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP128_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve51_CP128_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve51_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve51_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c b/src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c
index 5e5c7788..3691181f 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP128_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve51_CP128_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve51_CP128_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve51_CP128_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c b/src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c
index 879d3a76..7c9cfcc6 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP256_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve51_CP256_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve51_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve51_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c b/src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c
index 0ecc22be..ff5bccc0 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP256_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve51_CP256_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve51_CP256_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve51_CP256_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c b/src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c
index ed3f7eed..c91ed755 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP32_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,43 +552,45 @@ Hacl_HPKE_Curve51_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +608,7 @@ Hacl_HPKE_Curve51_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +619,44 @@ Hacl_HPKE_Curve51_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c b/src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c
index 1c4b30e4..e97ee4cd 100644
--- a/src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve51_CP32_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_51_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_51_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_51_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_51_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,43 +552,45 @@ Hacl_HPKE_Curve51_CP32_SHA512_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +608,7 @@ Hacl_HPKE_Curve51_CP32_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +619,44 @@ Hacl_HPKE_Curve51_CP32_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve51_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c b/src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c
index 70b41c45..24414b4b 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP128_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve64_CP128_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve64_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve64_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c b/src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c
index 6a4f3d01..81131914 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP128_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve64_CP128_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve64_CP128_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve64_CP128_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP128_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c b/src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c
index 725bb6cd..df58f0e1 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP256_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve64_CP256_SHA256_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve64_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve64_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c b/src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c
index e7be8835..8052db8f 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP256_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -608,43 +553,45 @@ Hacl_HPKE_Curve64_CP256_SHA512_sealBase(
     };
   uint32_t
   res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -662,7 +609,7 @@ Hacl_HPKE_Curve64_CP256_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -673,42 +620,44 @@ Hacl_HPKE_Curve64_CP256_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP256_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c b/src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c
index 92672abe..5c6ef179 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP32_SHA256.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)1U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 1U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)1U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 1U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp3,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp3, (uint16_t)32U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,43 +552,45 @@ Hacl_HPKE_Curve64_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +608,7 @@ Hacl_HPKE_Curve64_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +619,44 @@ Hacl_HPKE_Curve64_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c b/src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c
index 5ad7e761..cb24c3ac 100644
--- a/src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c
+++ b/src/msvc/Hacl_HPKE_Curve64_CP32_SHA512.c
@@ -40,262 +40,234 @@ Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(
   uint8_t o_shared[32U] = { 0U };
   uint8_t *o_pkE1 = o_pkE;
   Hacl_Curve25519_64_secret_to_public(o_pkE1, skE);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint32_t res0;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t o_dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(o_dh, skE, pkR);
-    uint8_t res2 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res2 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(o_dh[i], zeros[i]);
-      res2 = uu____0 & res2;
+      res2 = (uint32_t)uu____0 & (uint32_t)res2;
     }
     uint8_t z = res2;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res20 = res;
     uint8_t o_kemcontext[64U] = { 0U };
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)32U;
+      memcpy(o_kemcontext, o_pkE, 32U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 32U;
       uint8_t *o_pkR = o_pkRm;
-      memcpy(o_pkR, pkR, (uint32_t)32U * sizeof (uint8_t));
+      memcpy(o_pkR, pkR, 32U * sizeof (uint8_t));
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____1 = suite_id_kem;
-      uu____1[0U] = (uint8_t)0x4bU;
-      uu____1[1U] = (uint8_t)0x45U;
-      uu____1[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-      uu____2[0U] = (uint8_t)0U;
-      uu____2[1U] = (uint8_t)32U;
+      uu____1[0U] = 0x4bU;
+      uu____1[1U] = 0x45U;
+      uu____1[2U] = 0x4dU;
+      uint8_t *uu____2 = suite_id_kem + 3U;
+      uu____2[0U] = 0U;
+      uu____2[1U] = 32U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____3 = tmp0;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp0 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)91U;
+      uint32_t len = 91U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____4 = tmp + (uint32_t)2U;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uu____4[4U] = (uint8_t)0x2dU;
-      uu____4[5U] = (uint8_t)0x76U;
-      uu____4[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)64U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res0 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____4 = tmp + 2U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uu____4[4U] = 0x2dU;
+      uu____4[5U] = 0x76U;
+      uu____4[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 64U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res0 = 0U;
     }
     else
     {
-      res0 = (uint32_t)1U;
+      res0 = 1U;
     }
   }
   else
   {
-    res0 = (uint32_t)1U;
+    res0 = 1U;
   }
-  if (res0 == (uint32_t)0U)
+  if (res0 == 0U)
   {
     uint8_t o_context[129U] = { 0U };
     uint8_t o_secret[64U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____5 = suite_id;
-    uu____5[0U] = (uint8_t)0x48U;
-    uu____5[1U] = (uint8_t)0x50U;
-    uu____5[2U] = (uint8_t)0x4bU;
-    uu____5[3U] = (uint8_t)0x45U;
-    uint8_t *uu____6 = suite_id + (uint32_t)4U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)32U;
-    uint8_t *uu____7 = suite_id + (uint32_t)6U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
-    uint8_t *uu____8 = suite_id + (uint32_t)8U;
-    uu____8[0U] = (uint8_t)0U;
-    uu____8[1U] = (uint8_t)3U;
+    uu____5[0U] = 0x48U;
+    uu____5[1U] = 0x50U;
+    uu____5[2U] = 0x4bU;
+    uu____5[3U] = 0x45U;
+    uint8_t *uu____6 = suite_id + 4U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 32U;
+    uint8_t *uu____7 = suite_id + 6U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
+    uint8_t *uu____8 = suite_id + 8U;
+    uu____8[0U] = 0U;
+    uu____8[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[64U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp0;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[64U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp1;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+    memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____11 = tmp2;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)151U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_512(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)64U);
-    uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)64U,
-      tmp3,
-      len3,
-      (uint32_t)64U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)151U;
+    store16_be(tmp3, (uint16_t)64U);
+    uint8_t *uu____12 = tmp3 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 151U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____13 = tmp4 + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)158U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 158U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____14 = tmp + (uint32_t)2U;
-    uu____14[0U] = (uint8_t)0x48U;
-    uu____14[1U] = (uint8_t)0x50U;
-    uu____14[2U] = (uint8_t)0x4bU;
-    uu____14[3U] = (uint8_t)0x45U;
-    uu____14[4U] = (uint8_t)0x2dU;
-    uu____14[5U] = (uint8_t)0x76U;
-    uu____14[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____14 = tmp + 2U;
+    uu____14[0U] = 0x48U;
+    uu____14[1U] = 0x50U;
+    uu____14[2U] = 0x4bU;
+    uu____14[3U] = 0x45U;
+    uu____14[4U] = 0x2dU;
+    uu____14[5U] = 0x76U;
+    uu____14[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res0;
   }
   return res0;
@@ -312,272 +284,245 @@ Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR(
 {
   uint8_t pkR[32U] = { 0U };
   Hacl_Curve25519_64_secret_to_public(pkR, skR);
-  uint32_t res1 = (uint32_t)0U;
+  uint32_t res1 = 0U;
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
     uint8_t *pkE = enc;
     uint8_t dh[32U] = { 0U };
     uint8_t zeros[32U] = { 0U };
     Hacl_Curve25519_64_scalarmult(dh, skR, pkE);
-    uint8_t res0 = (uint8_t)255U;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    uint8_t res0 = 255U;
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t uu____0 = FStar_UInt8_eq_mask(dh[i], zeros[i]);
-      res0 = uu____0 & res0;
+      res0 = (uint32_t)uu____0 & (uint32_t)res0;
     }
     uint8_t z = res0;
     uint32_t res;
-    if (z == (uint8_t)255U)
+    if (z == 255U)
     {
-      res = (uint32_t)1U;
+      res = 1U;
     }
     else
     {
-      res = (uint32_t)0U;
+      res = 0U;
     }
     uint32_t res11 = res;
     uint32_t res2;
     uint8_t kemcontext[64U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)32U;
+      uint8_t *pkRm = kemcontext + 32U;
       uint8_t *pkR1 = pkRm;
       Hacl_Curve25519_64_secret_to_public(pkR1, skR);
-      uint32_t res20 = (uint32_t)0U;
-      if (res20 == (uint32_t)0U)
+      uint32_t res20 = 0U;
+      if (res20 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)32U * sizeof (uint8_t));
+        memcpy(kemcontext, enc, 32U * sizeof (uint8_t));
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____1 = suite_id_kem;
-        uu____1[0U] = (uint8_t)0x4bU;
-        uu____1[1U] = (uint8_t)0x45U;
-        uu____1[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____2 = suite_id_kem + (uint32_t)3U;
-        uu____2[0U] = (uint8_t)0U;
-        uu____2[1U] = (uint8_t)32U;
+        uu____1[0U] = 0x4bU;
+        uu____1[1U] = 0x45U;
+        uu____1[2U] = 0x4dU;
+        uint8_t *uu____2 = suite_id_kem + 3U;
+        uu____2[0U] = 0U;
+        uu____2[1U] = 32U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp0, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____3 = tmp0;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp0 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp0 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp0, len0);
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp0 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp0 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp0 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp0, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)91U;
+        uint32_t len = 91U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____4 = tmp + (uint32_t)2U;
-        uu____4[0U] = (uint8_t)0x48U;
-        uu____4[1U] = (uint8_t)0x50U;
-        uu____4[2U] = (uint8_t)0x4bU;
-        uu____4[3U] = (uint8_t)0x45U;
-        uu____4[4U] = (uint8_t)0x2dU;
-        uu____4[5U] = (uint8_t)0x76U;
-        uu____4[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)64U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res2 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____4 = tmp + 2U;
+        uu____4[0U] = 0x48U;
+        uu____4[1U] = 0x50U;
+        uu____4[2U] = 0x4bU;
+        uu____4[3U] = 0x45U;
+        uu____4[4U] = 0x2dU;
+        uu____4[5U] = 0x76U;
+        uu____4[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 64U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
       uint8_t o_context[129U] = { 0U };
       uint8_t o_secret[64U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____5 = suite_id;
-      uu____5[0U] = (uint8_t)0x48U;
-      uu____5[1U] = (uint8_t)0x50U;
-      uu____5[2U] = (uint8_t)0x4bU;
-      uu____5[3U] = (uint8_t)0x45U;
-      uint8_t *uu____6 = suite_id + (uint32_t)4U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)32U;
-      uint8_t *uu____7 = suite_id + (uint32_t)6U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
-      uint8_t *uu____8 = suite_id + (uint32_t)8U;
-      uu____8[0U] = (uint8_t)0U;
-      uu____8[1U] = (uint8_t)3U;
+      uu____5[0U] = 0x48U;
+      uu____5[1U] = 0x50U;
+      uu____5[2U] = 0x4bU;
+      uu____5[3U] = 0x45U;
+      uint8_t *uu____6 = suite_id + 4U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 32U;
+      uint8_t *uu____7 = suite_id + 6U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
+      uint8_t *uu____8 = suite_id + 8U;
+      uu____8[0U] = 0U;
+      uu____8[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[64U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp0, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp0;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_psk_id_hash, empty, 0U, tmp0, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[64U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp1, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp1;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)64U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)65U, o_info_hash, (uint32_t)64U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_info_hash, empty, 0U, tmp1, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 64U * sizeof (uint8_t));
+      memcpy(o_context + 65U, o_info_hash, 64U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp2, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____11 = tmp2;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_512(o_secret, shared, (uint32_t)32U, tmp2, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)151U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_512(o_secret, shared, 32U, tmp2, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp3, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp3, (uint16_t)(uint32_t)64U);
-      uint8_t *uu____12 = tmp3 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)64U,
-        tmp3,
-        len3,
-        (uint32_t)64U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)151U;
+      store16_be(tmp3, (uint16_t)64U);
+      uint8_t *uu____12 = tmp3 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp3 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_exporter, o_secret, 64U, tmp3, len3, 64U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 151U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp4, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____13 = tmp4 + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, (uint32_t)64U, tmp4, len4, (uint32_t)32U);
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____13 = tmp4 + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_key, o_secret, 64U, tmp4, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)158U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 158U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____14 = tmp + (uint32_t)2U;
-      uu____14[0U] = (uint8_t)0x48U;
-      uu____14[1U] = (uint8_t)0x50U;
-      uu____14[2U] = (uint8_t)0x4bU;
-      uu____14[3U] = (uint8_t)0x45U;
-      uu____14[4U] = (uint8_t)0x2dU;
-      uu____14[5U] = (uint8_t)0x76U;
-      uu____14[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)129U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, (uint32_t)64U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____14 = tmp + 2U;
+      uu____14[0U] = 0x48U;
+      uu____14[1U] = 0x50U;
+      uu____14[2U] = 0x4bU;
+      uu____14[3U] = 0x45U;
+      uu____14[4U] = 0x2dU;
+      uu____14[5U] = 0x76U;
+      uu____14[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 129U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_512(o_ctx.ctx_nonce, o_secret, 64U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -596,7 +541,7 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -607,43 +552,45 @@ Hacl_HPKE_Curve64_CP32_SHA512_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -661,7 +608,7 @@ Hacl_HPKE_Curve64_CP32_SHA512_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[64U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -672,42 +619,44 @@ Hacl_HPKE_Curve64_CP32_SHA512_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_Curve64_CP32_SHA512_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_P256_CP128_SHA256.c b/src/msvc/Hacl_HPKE_P256_CP128_SHA256.c
index 5dad7dcf..6672d593 100644
--- a/src/msvc/Hacl_HPKE_P256_CP128_SHA256.c
+++ b/src/msvc/Hacl_HPKE_P256_CP128_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP128_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP128_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp2 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp3 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp4 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp5 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,43 +565,45 @@ Hacl_HPKE_P256_CP128_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP128_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_128_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd128_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +621,7 @@ Hacl_HPKE_P256_CP128_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +632,44 @@ Hacl_HPKE_P256_CP128_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP128_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_128_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd128_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_P256_CP256_SHA256.c b/src/msvc/Hacl_HPKE_P256_CP256_SHA256.c
index 2e932f40..962abee4 100644
--- a/src/msvc/Hacl_HPKE_P256_CP256_SHA256.c
+++ b/src/msvc/Hacl_HPKE_P256_CP256_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP256_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP256_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp2 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp3 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp4 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp5 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,43 +565,45 @@ Hacl_HPKE_P256_CP256_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP256_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_256_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_Simd256_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +621,7 @@ Hacl_HPKE_P256_CP256_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +632,44 @@ Hacl_HPKE_P256_CP256_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP256_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_256_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_Simd256_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_HPKE_P256_CP32_SHA256.c b/src/msvc/Hacl_HPKE_P256_CP32_SHA256.c
index 34dc3403..0869fe45 100644
--- a/src/msvc/Hacl_HPKE_P256_CP32_SHA256.c
+++ b/src/msvc/Hacl_HPKE_P256_CP32_SHA256.c
@@ -38,267 +38,239 @@ Hacl_HPKE_P256_CP32_SHA256_setupBaseS(
 )
 {
   uint8_t o_shared[32U] = { 0U };
-  uint8_t *o_pkE1 = o_pkE + (uint32_t)1U;
+  uint8_t *o_pkE1 = o_pkE + 1U;
   bool res0 = Hacl_Impl_P256_DH_ecp256dh_i(o_pkE1, skE);
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint32_t res3;
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    o_pkE[0U] = (uint8_t)4U;
+    o_pkE[0U] = 4U;
     uint8_t o_dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkR, skE);
-    memcpy(o_dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(o_dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res2;
     if (res)
     {
-      res2 = (uint32_t)0U;
+      res2 = 0U;
     }
     else
     {
-      res2 = (uint32_t)1U;
+      res2 = 1U;
     }
     uint8_t o_kemcontext[130U] = { 0U };
-    if (res2 == (uint32_t)0U)
+    if (res2 == 0U)
     {
-      memcpy(o_kemcontext, o_pkE, (uint32_t)65U * sizeof (uint8_t));
-      uint8_t *o_pkRm = o_kemcontext + (uint32_t)65U;
-      uint8_t *o_pkR = o_pkRm + (uint32_t)1U;
-      memcpy(o_pkR, pkR, (uint32_t)64U * sizeof (uint8_t));
-      o_pkRm[0U] = (uint8_t)4U;
+      memcpy(o_kemcontext, o_pkE, 65U * sizeof (uint8_t));
+      uint8_t *o_pkRm = o_kemcontext + 65U;
+      uint8_t *o_pkR = o_pkRm + 1U;
+      memcpy(o_pkR, pkR, 64U * sizeof (uint8_t));
+      o_pkRm[0U] = 4U;
       uint8_t *o_dhm = o_dh;
       uint8_t o_eae_prk[32U] = { 0U };
       uint8_t suite_id_kem[5U] = { 0U };
       uint8_t *uu____0 = suite_id_kem;
-      uu____0[0U] = (uint8_t)0x4bU;
-      uu____0[1U] = (uint8_t)0x45U;
-      uu____0[2U] = (uint8_t)0x4dU;
-      uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-      uu____1[0U] = (uint8_t)0U;
-      uu____1[1U] = (uint8_t)16U;
+      uu____0[0U] = 0x4bU;
+      uu____0[1U] = 0x45U;
+      uu____0[2U] = 0x4dU;
+      uint8_t *uu____1 = suite_id_kem + 3U;
+      uu____1[0U] = 0U;
+      uu____1[1U] = 16U;
       uint8_t *empty = suite_id_kem;
-      uint8_t
-      label_eae_prk[7U] =
-        {
-          (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-          (uint8_t)0x72U, (uint8_t)0x6bU
-        };
-      uint32_t len0 = (uint32_t)51U;
+      uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+      uint32_t len0 = 51U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____2 = tmp1;
-      uu____2[0U] = (uint8_t)0x48U;
-      uu____2[1U] = (uint8_t)0x50U;
-      uu____2[2U] = (uint8_t)0x4bU;
-      uu____2[3U] = (uint8_t)0x45U;
-      uu____2[4U] = (uint8_t)0x2dU;
-      uu____2[5U] = (uint8_t)0x76U;
-      uu____2[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)19U, o_dhm, (uint32_t)32U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+      uu____2[0U] = 0x48U;
+      uu____2[1U] = 0x50U;
+      uu____2[2U] = 0x4bU;
+      uu____2[3U] = 0x45U;
+      uu____2[4U] = 0x2dU;
+      uu____2[5U] = 0x76U;
+      uu____2[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+      memcpy(tmp1 + 19U, o_dhm, 32U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
       uint8_t
       label_shared_secret[13U] =
         {
-          (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-          (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+          0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U
         };
-      uint32_t len = (uint32_t)157U;
+      uint32_t len = 157U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____3 = tmp + (uint32_t)2U;
-      uu____3[0U] = (uint8_t)0x48U;
-      uu____3[1U] = (uint8_t)0x50U;
-      uu____3[2U] = (uint8_t)0x4bU;
-      uu____3[3U] = (uint8_t)0x45U;
-      uu____3[4U] = (uint8_t)0x2dU;
-      uu____3[5U] = (uint8_t)0x76U;
-      uu____3[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)27U, o_kemcontext, (uint32_t)130U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-      res3 = (uint32_t)0U;
+      store16_be(tmp, (uint16_t)32U);
+      uint8_t *uu____3 = tmp + 2U;
+      uu____3[0U] = 0x48U;
+      uu____3[1U] = 0x50U;
+      uu____3[2U] = 0x4bU;
+      uu____3[3U] = 0x45U;
+      uu____3[4U] = 0x2dU;
+      uu____3[5U] = 0x76U;
+      uu____3[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+      memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+      memcpy(tmp + 27U, o_kemcontext, 130U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_shared, o_eae_prk, 32U, tmp, len, 32U);
+      res3 = 0U;
     }
     else
     {
-      res3 = (uint32_t)1U;
+      res3 = 1U;
     }
   }
   else
   {
-    res3 = (uint32_t)1U;
+    res3 = 1U;
   }
-  if (res3 == (uint32_t)0U)
+  if (res3 == 0U)
   {
     uint8_t o_context[65U] = { 0U };
     uint8_t o_secret[32U] = { 0U };
     uint8_t suite_id[10U] = { 0U };
     uint8_t *uu____4 = suite_id;
-    uu____4[0U] = (uint8_t)0x48U;
-    uu____4[1U] = (uint8_t)0x50U;
-    uu____4[2U] = (uint8_t)0x4bU;
-    uu____4[3U] = (uint8_t)0x45U;
-    uint8_t *uu____5 = suite_id + (uint32_t)4U;
-    uu____5[0U] = (uint8_t)0U;
-    uu____5[1U] = (uint8_t)16U;
-    uint8_t *uu____6 = suite_id + (uint32_t)6U;
-    uu____6[0U] = (uint8_t)0U;
-    uu____6[1U] = (uint8_t)1U;
-    uint8_t *uu____7 = suite_id + (uint32_t)8U;
-    uu____7[0U] = (uint8_t)0U;
-    uu____7[1U] = (uint8_t)3U;
+    uu____4[0U] = 0x48U;
+    uu____4[1U] = 0x50U;
+    uu____4[2U] = 0x4bU;
+    uu____4[3U] = 0x45U;
+    uint8_t *uu____5 = suite_id + 4U;
+    uu____5[0U] = 0U;
+    uu____5[1U] = 16U;
+    uint8_t *uu____6 = suite_id + 6U;
+    uu____6[0U] = 0U;
+    uu____6[1U] = 1U;
+    uint8_t *uu____7 = suite_id + 8U;
+    uu____7[0U] = 0U;
+    uu____7[1U] = 3U;
     uint8_t
     label_psk_id_hash[11U] =
-      {
-        (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-        (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-        (uint8_t)0x68U
-      };
+      { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_psk_id_hash[32U] = { 0U };
     uint8_t *empty = suite_id;
-    uint32_t len0 = (uint32_t)28U;
+    uint32_t len0 = 28U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len0);
     uint8_t *tmp0 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
     memset(tmp0, 0U, len0 * sizeof (uint8_t));
     uint8_t *uu____8 = tmp0;
-    uu____8[0U] = (uint8_t)0x48U;
-    uu____8[1U] = (uint8_t)0x50U;
-    uu____8[2U] = (uint8_t)0x4bU;
-    uu____8[3U] = (uint8_t)0x45U;
-    uu____8[4U] = (uint8_t)0x2dU;
-    uu____8[5U] = (uint8_t)0x76U;
-    uu____8[6U] = (uint8_t)0x31U;
-    memcpy(tmp0 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-    memcpy(tmp0 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp0, len0);
+    uu____8[0U] = 0x48U;
+    uu____8[1U] = 0x50U;
+    uu____8[2U] = 0x4bU;
+    uu____8[3U] = 0x45U;
+    uu____8[4U] = 0x2dU;
+    uu____8[5U] = 0x76U;
+    uu____8[6U] = 0x31U;
+    memcpy(tmp0 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp0 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+    memcpy(tmp0 + 28U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp0, len0);
     uint8_t
-    label_info_hash[9U] =
-      {
-        (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-        (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-      };
+    label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
     uint8_t o_info_hash[32U] = { 0U };
-    uint32_t len1 = (uint32_t)26U + infolen;
+    uint32_t len1 = 26U + infolen;
     KRML_CHECK_SIZE(sizeof (uint8_t), len1);
     uint8_t *tmp1 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
     memset(tmp1, 0U, len1 * sizeof (uint8_t));
     uint8_t *uu____9 = tmp1;
-    uu____9[0U] = (uint8_t)0x48U;
-    uu____9[1U] = (uint8_t)0x50U;
-    uu____9[2U] = (uint8_t)0x4bU;
-    uu____9[3U] = (uint8_t)0x45U;
-    uu____9[4U] = (uint8_t)0x2dU;
-    uu____9[5U] = (uint8_t)0x76U;
-    uu____9[6U] = (uint8_t)0x31U;
-    memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-    memcpy(tmp1 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp1, len1);
-    o_context[0U] = (uint8_t)0U;
-    memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-    memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-    uint8_t
-    label_secret[6U] =
-      {
-        (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-        (uint8_t)0x74U
-      };
-    uint32_t len2 = (uint32_t)23U;
+    uu____9[0U] = 0x48U;
+    uu____9[1U] = 0x50U;
+    uu____9[2U] = 0x4bU;
+    uu____9[3U] = 0x45U;
+    uu____9[4U] = 0x2dU;
+    uu____9[5U] = 0x76U;
+    uu____9[6U] = 0x31U;
+    memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp1 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+    memcpy(tmp1 + 26U, info, infolen * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp1, len1);
+    o_context[0U] = 0U;
+    memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+    memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+    uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+    uint32_t len2 = 23U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len2);
     uint8_t *tmp2 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
     memset(tmp2, 0U, len2 * sizeof (uint8_t));
     uint8_t *uu____10 = tmp2;
-    uu____10[0U] = (uint8_t)0x48U;
-    uu____10[1U] = (uint8_t)0x50U;
-    uu____10[2U] = (uint8_t)0x4bU;
-    uu____10[3U] = (uint8_t)0x45U;
-    uu____10[4U] = (uint8_t)0x2dU;
-    uu____10[5U] = (uint8_t)0x76U;
-    uu____10[6U] = (uint8_t)0x31U;
-    memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-    memcpy(tmp2 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, (uint32_t)32U, tmp2, len2);
-    uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-    uint32_t len3 = (uint32_t)87U;
+    uu____10[0U] = 0x48U;
+    uu____10[1U] = 0x50U;
+    uu____10[2U] = 0x4bU;
+    uu____10[3U] = 0x45U;
+    uu____10[4U] = 0x2dU;
+    uu____10[5U] = 0x76U;
+    uu____10[6U] = 0x31U;
+    memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp2 + 17U, label_secret, 6U * sizeof (uint8_t));
+    memcpy(tmp2 + 23U, empty, 0U * sizeof (uint8_t));
+    Hacl_HKDF_extract_sha2_256(o_secret, o_shared, 32U, tmp2, len2);
+    uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+    uint32_t len3 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len3);
     uint8_t *tmp3 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
     memset(tmp3, 0U, len3 * sizeof (uint8_t));
-    store16_be(tmp3, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____11 = tmp3 + (uint32_t)2U;
-    uu____11[0U] = (uint8_t)0x48U;
-    uu____11[1U] = (uint8_t)0x50U;
-    uu____11[2U] = (uint8_t)0x4bU;
-    uu____11[3U] = (uint8_t)0x45U;
-    uu____11[4U] = (uint8_t)0x2dU;
-    uu____11[5U] = (uint8_t)0x76U;
-    uu____11[6U] = (uint8_t)0x31U;
-    memcpy(tmp3 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp3 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-      o_secret,
-      (uint32_t)32U,
-      tmp3,
-      len3,
-      (uint32_t)32U);
-    uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-    uint32_t len4 = (uint32_t)87U;
+    store16_be(tmp3, (uint16_t)32U);
+    uint8_t *uu____11 = tmp3 + 2U;
+    uu____11[0U] = 0x48U;
+    uu____11[1U] = 0x50U;
+    uu____11[2U] = 0x4bU;
+    uu____11[3U] = 0x45U;
+    uu____11[4U] = 0x2dU;
+    uu____11[5U] = 0x76U;
+    uu____11[6U] = 0x31U;
+    memcpy(tmp3 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp3 + 19U, label_exp, 3U * sizeof (uint8_t));
+    memcpy(tmp3 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp3, len3, 32U);
+    uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+    uint32_t len4 = 87U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len4);
     uint8_t *tmp4 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
     memset(tmp4, 0U, len4 * sizeof (uint8_t));
-    store16_be(tmp4, (uint16_t)(uint32_t)32U);
-    uint8_t *uu____12 = tmp4 + (uint32_t)2U;
-    uu____12[0U] = (uint8_t)0x48U;
-    uu____12[1U] = (uint8_t)0x50U;
-    uu____12[2U] = (uint8_t)0x4bU;
-    uu____12[3U] = (uint8_t)0x45U;
-    uu____12[4U] = (uint8_t)0x2dU;
-    uu____12[5U] = (uint8_t)0x76U;
-    uu____12[6U] = (uint8_t)0x31U;
-    memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-    memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp4, len4, (uint32_t)32U);
+    store16_be(tmp4, (uint16_t)32U);
+    uint8_t *uu____12 = tmp4 + 2U;
+    uu____12[0U] = 0x48U;
+    uu____12[1U] = 0x50U;
+    uu____12[2U] = 0x4bU;
+    uu____12[3U] = 0x45U;
+    uu____12[4U] = 0x2dU;
+    uu____12[5U] = 0x76U;
+    uu____12[6U] = 0x31U;
+    memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp4 + 19U, label_key, 3U * sizeof (uint8_t));
+    memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp4, len4, 32U);
     uint8_t
     label_base_nonce[10U] =
-      {
-        (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-        (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-      };
-    uint32_t len = (uint32_t)94U;
+      { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+    uint32_t len = 94U;
     KRML_CHECK_SIZE(sizeof (uint8_t), len);
     uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
     memset(tmp, 0U, len * sizeof (uint8_t));
-    store16_be(tmp, (uint16_t)(uint32_t)12U);
-    uint8_t *uu____13 = tmp + (uint32_t)2U;
-    uu____13[0U] = (uint8_t)0x48U;
-    uu____13[1U] = (uint8_t)0x50U;
-    uu____13[2U] = (uint8_t)0x4bU;
-    uu____13[3U] = (uint8_t)0x45U;
-    uu____13[4U] = (uint8_t)0x2dU;
-    uu____13[5U] = (uint8_t)0x76U;
-    uu____13[6U] = (uint8_t)0x31U;
-    memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-    memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-    o_ctx.ctx_seq[0U] = (uint64_t)0U;
+    store16_be(tmp, (uint16_t)12U);
+    uint8_t *uu____13 = tmp + 2U;
+    uu____13[0U] = 0x48U;
+    uu____13[1U] = 0x50U;
+    uu____13[2U] = 0x4bU;
+    uu____13[3U] = 0x45U;
+    uu____13[4U] = 0x2dU;
+    uu____13[5U] = 0x76U;
+    uu____13[6U] = 0x31U;
+    memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+    memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+    memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+    Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+    o_ctx.ctx_seq[0U] = 0ULL;
     return res3;
   }
   return res3;
@@ -318,279 +290,252 @@ Hacl_HPKE_P256_CP32_SHA256_setupBaseR(
   uint32_t res1;
   if (res0)
   {
-    res1 = (uint32_t)0U;
+    res1 = 0U;
   }
   else
   {
-    res1 = (uint32_t)1U;
+    res1 = 1U;
   }
   uint8_t shared[32U] = { 0U };
-  if (res1 == (uint32_t)0U)
+  if (res1 == 0U)
   {
-    uint8_t *pkE = enc + (uint32_t)1U;
+    uint8_t *pkE = enc + 1U;
     uint8_t dh[64U] = { 0U };
     uint8_t tmp0[64U] = { 0U };
     bool res = Hacl_Impl_P256_DH_ecp256dh_r(tmp0, pkE, skR);
-    memcpy(dh, tmp0, (uint32_t)64U * sizeof (uint8_t));
+    memcpy(dh, tmp0, 64U * sizeof (uint8_t));
     uint32_t res11;
     if (res)
     {
-      res11 = (uint32_t)0U;
+      res11 = 0U;
     }
     else
     {
-      res11 = (uint32_t)1U;
+      res11 = 1U;
     }
     uint32_t res20;
     uint8_t kemcontext[130U] = { 0U };
-    if (res11 == (uint32_t)0U)
+    if (res11 == 0U)
     {
-      uint8_t *pkRm = kemcontext + (uint32_t)65U;
-      uint8_t *pkR1 = pkRm + (uint32_t)1U;
+      uint8_t *pkRm = kemcontext + 65U;
+      uint8_t *pkR1 = pkRm + 1U;
       bool res3 = Hacl_Impl_P256_DH_ecp256dh_i(pkR1, skR);
       uint32_t res2;
       if (res3)
       {
-        res2 = (uint32_t)0U;
+        res2 = 0U;
       }
       else
       {
-        res2 = (uint32_t)1U;
+        res2 = 1U;
       }
-      if (res2 == (uint32_t)0U)
+      if (res2 == 0U)
       {
-        memcpy(kemcontext, enc, (uint32_t)65U * sizeof (uint8_t));
-        pkRm[0U] = (uint8_t)4U;
+        memcpy(kemcontext, enc, 65U * sizeof (uint8_t));
+        pkRm[0U] = 4U;
         uint8_t *dhm = dh;
         uint8_t o_eae_prk[32U] = { 0U };
         uint8_t suite_id_kem[5U] = { 0U };
         uint8_t *uu____0 = suite_id_kem;
-        uu____0[0U] = (uint8_t)0x4bU;
-        uu____0[1U] = (uint8_t)0x45U;
-        uu____0[2U] = (uint8_t)0x4dU;
-        uint8_t *uu____1 = suite_id_kem + (uint32_t)3U;
-        uu____1[0U] = (uint8_t)0U;
-        uu____1[1U] = (uint8_t)16U;
+        uu____0[0U] = 0x4bU;
+        uu____0[1U] = 0x45U;
+        uu____0[2U] = 0x4dU;
+        uint8_t *uu____1 = suite_id_kem + 3U;
+        uu____1[0U] = 0U;
+        uu____1[1U] = 16U;
         uint8_t *empty = suite_id_kem;
-        uint8_t
-        label_eae_prk[7U] =
-          {
-            (uint8_t)0x65U, (uint8_t)0x61U, (uint8_t)0x65U, (uint8_t)0x5fU, (uint8_t)0x70U,
-            (uint8_t)0x72U, (uint8_t)0x6bU
-          };
-        uint32_t len0 = (uint32_t)51U;
+        uint8_t label_eae_prk[7U] = { 0x65U, 0x61U, 0x65U, 0x5fU, 0x70U, 0x72U, 0x6bU };
+        uint32_t len0 = 51U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len0);
         uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
         memset(tmp1, 0U, len0 * sizeof (uint8_t));
         uint8_t *uu____2 = tmp1;
-        uu____2[0U] = (uint8_t)0x48U;
-        uu____2[1U] = (uint8_t)0x50U;
-        uu____2[2U] = (uint8_t)0x4bU;
-        uu____2[3U] = (uint8_t)0x45U;
-        uu____2[4U] = (uint8_t)0x2dU;
-        uu____2[5U] = (uint8_t)0x76U;
-        uu____2[6U] = (uint8_t)0x31U;
-        memcpy(tmp1 + (uint32_t)7U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)12U, label_eae_prk, (uint32_t)7U * sizeof (uint8_t));
-        memcpy(tmp1 + (uint32_t)19U, dhm, (uint32_t)32U * sizeof (uint8_t));
-        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, (uint32_t)0U, tmp1, len0);
+        uu____2[0U] = 0x48U;
+        uu____2[1U] = 0x50U;
+        uu____2[2U] = 0x4bU;
+        uu____2[3U] = 0x45U;
+        uu____2[4U] = 0x2dU;
+        uu____2[5U] = 0x76U;
+        uu____2[6U] = 0x31U;
+        memcpy(tmp1 + 7U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp1 + 12U, label_eae_prk, 7U * sizeof (uint8_t));
+        memcpy(tmp1 + 19U, dhm, 32U * sizeof (uint8_t));
+        Hacl_HKDF_extract_sha2_256(o_eae_prk, empty, 0U, tmp1, len0);
         uint8_t
         label_shared_secret[13U] =
           {
-            (uint8_t)0x73U, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x72U, (uint8_t)0x65U,
-            (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U,
-            (uint8_t)0x72U, (uint8_t)0x65U, (uint8_t)0x74U
+            0x73U, 0x68U, 0x61U, 0x72U, 0x65U, 0x64U, 0x5fU, 0x73U, 0x65U, 0x63U, 0x72U, 0x65U,
+            0x74U
           };
-        uint32_t len = (uint32_t)157U;
+        uint32_t len = 157U;
         KRML_CHECK_SIZE(sizeof (uint8_t), len);
         uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
         memset(tmp, 0U, len * sizeof (uint8_t));
-        store16_be(tmp, (uint16_t)(uint32_t)32U);
-        uint8_t *uu____3 = tmp + (uint32_t)2U;
-        uu____3[0U] = (uint8_t)0x48U;
-        uu____3[1U] = (uint8_t)0x50U;
-        uu____3[2U] = (uint8_t)0x4bU;
-        uu____3[3U] = (uint8_t)0x45U;
-        uu____3[4U] = (uint8_t)0x2dU;
-        uu____3[5U] = (uint8_t)0x76U;
-        uu____3[6U] = (uint8_t)0x31U;
-        memcpy(tmp + (uint32_t)9U, suite_id_kem, (uint32_t)5U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)14U, label_shared_secret, (uint32_t)13U * sizeof (uint8_t));
-        memcpy(tmp + (uint32_t)27U, kemcontext, (uint32_t)130U * sizeof (uint8_t));
-        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, (uint32_t)32U, tmp, len, (uint32_t)32U);
-        res20 = (uint32_t)0U;
+        store16_be(tmp, (uint16_t)32U);
+        uint8_t *uu____3 = tmp + 2U;
+        uu____3[0U] = 0x48U;
+        uu____3[1U] = 0x50U;
+        uu____3[2U] = 0x4bU;
+        uu____3[3U] = 0x45U;
+        uu____3[4U] = 0x2dU;
+        uu____3[5U] = 0x76U;
+        uu____3[6U] = 0x31U;
+        memcpy(tmp + 9U, suite_id_kem, 5U * sizeof (uint8_t));
+        memcpy(tmp + 14U, label_shared_secret, 13U * sizeof (uint8_t));
+        memcpy(tmp + 27U, kemcontext, 130U * sizeof (uint8_t));
+        Hacl_HKDF_expand_sha2_256(shared, o_eae_prk, 32U, tmp, len, 32U);
+        res20 = 0U;
       }
       else
       {
-        res20 = (uint32_t)1U;
+        res20 = 1U;
       }
     }
     else
     {
-      res20 = (uint32_t)1U;
+      res20 = 1U;
     }
-    if (res20 == (uint32_t)0U)
+    if (res20 == 0U)
     {
       uint8_t o_context[65U] = { 0U };
       uint8_t o_secret[32U] = { 0U };
       uint8_t suite_id[10U] = { 0U };
       uint8_t *uu____4 = suite_id;
-      uu____4[0U] = (uint8_t)0x48U;
-      uu____4[1U] = (uint8_t)0x50U;
-      uu____4[2U] = (uint8_t)0x4bU;
-      uu____4[3U] = (uint8_t)0x45U;
-      uint8_t *uu____5 = suite_id + (uint32_t)4U;
-      uu____5[0U] = (uint8_t)0U;
-      uu____5[1U] = (uint8_t)16U;
-      uint8_t *uu____6 = suite_id + (uint32_t)6U;
-      uu____6[0U] = (uint8_t)0U;
-      uu____6[1U] = (uint8_t)1U;
-      uint8_t *uu____7 = suite_id + (uint32_t)8U;
-      uu____7[0U] = (uint8_t)0U;
-      uu____7[1U] = (uint8_t)3U;
+      uu____4[0U] = 0x48U;
+      uu____4[1U] = 0x50U;
+      uu____4[2U] = 0x4bU;
+      uu____4[3U] = 0x45U;
+      uint8_t *uu____5 = suite_id + 4U;
+      uu____5[0U] = 0U;
+      uu____5[1U] = 16U;
+      uint8_t *uu____6 = suite_id + 6U;
+      uu____6[0U] = 0U;
+      uu____6[1U] = 1U;
+      uint8_t *uu____7 = suite_id + 8U;
+      uu____7[0U] = 0U;
+      uu____7[1U] = 3U;
       uint8_t
       label_psk_id_hash[11U] =
-        {
-          (uint8_t)0x70U, (uint8_t)0x73U, (uint8_t)0x6bU, (uint8_t)0x5fU, (uint8_t)0x69U,
-          (uint8_t)0x64U, (uint8_t)0x5fU, (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U,
-          (uint8_t)0x68U
-        };
+        { 0x70U, 0x73U, 0x6bU, 0x5fU, 0x69U, 0x64U, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_psk_id_hash[32U] = { 0U };
       uint8_t *empty = suite_id;
-      uint32_t len0 = (uint32_t)28U;
+      uint32_t len0 = 28U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len0);
       uint8_t *tmp1 = (uint8_t *)alloca(len0 * sizeof (uint8_t));
       memset(tmp1, 0U, len0 * sizeof (uint8_t));
       uint8_t *uu____8 = tmp1;
-      uu____8[0U] = (uint8_t)0x48U;
-      uu____8[1U] = (uint8_t)0x50U;
-      uu____8[2U] = (uint8_t)0x4bU;
-      uu____8[3U] = (uint8_t)0x45U;
-      uu____8[4U] = (uint8_t)0x2dU;
-      uu____8[5U] = (uint8_t)0x76U;
-      uu____8[6U] = (uint8_t)0x31U;
-      memcpy(tmp1 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)17U, label_psk_id_hash, (uint32_t)11U * sizeof (uint8_t));
-      memcpy(tmp1 + (uint32_t)28U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, (uint32_t)0U, tmp1, len0);
+      uu____8[0U] = 0x48U;
+      uu____8[1U] = 0x50U;
+      uu____8[2U] = 0x4bU;
+      uu____8[3U] = 0x45U;
+      uu____8[4U] = 0x2dU;
+      uu____8[5U] = 0x76U;
+      uu____8[6U] = 0x31U;
+      memcpy(tmp1 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp1 + 17U, label_psk_id_hash, 11U * sizeof (uint8_t));
+      memcpy(tmp1 + 28U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_psk_id_hash, empty, 0U, tmp1, len0);
       uint8_t
-      label_info_hash[9U] =
-        {
-          (uint8_t)0x69U, (uint8_t)0x6eU, (uint8_t)0x66U, (uint8_t)0x6fU, (uint8_t)0x5fU,
-          (uint8_t)0x68U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x68U
-        };
+      label_info_hash[9U] = { 0x69U, 0x6eU, 0x66U, 0x6fU, 0x5fU, 0x68U, 0x61U, 0x73U, 0x68U };
       uint8_t o_info_hash[32U] = { 0U };
-      uint32_t len1 = (uint32_t)26U + infolen;
+      uint32_t len1 = 26U + infolen;
       KRML_CHECK_SIZE(sizeof (uint8_t), len1);
       uint8_t *tmp2 = (uint8_t *)alloca(len1 * sizeof (uint8_t));
       memset(tmp2, 0U, len1 * sizeof (uint8_t));
       uint8_t *uu____9 = tmp2;
-      uu____9[0U] = (uint8_t)0x48U;
-      uu____9[1U] = (uint8_t)0x50U;
-      uu____9[2U] = (uint8_t)0x4bU;
-      uu____9[3U] = (uint8_t)0x45U;
-      uu____9[4U] = (uint8_t)0x2dU;
-      uu____9[5U] = (uint8_t)0x76U;
-      uu____9[6U] = (uint8_t)0x31U;
-      memcpy(tmp2 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)17U, label_info_hash, (uint32_t)9U * sizeof (uint8_t));
-      memcpy(tmp2 + (uint32_t)26U, info, infolen * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, (uint32_t)0U, tmp2, len1);
-      o_context[0U] = (uint8_t)0U;
-      memcpy(o_context + (uint32_t)1U, o_psk_id_hash, (uint32_t)32U * sizeof (uint8_t));
-      memcpy(o_context + (uint32_t)33U, o_info_hash, (uint32_t)32U * sizeof (uint8_t));
-      uint8_t
-      label_secret[6U] =
-        {
-          (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x63U, (uint8_t)0x72U, (uint8_t)0x65U,
-          (uint8_t)0x74U
-        };
-      uint32_t len2 = (uint32_t)23U;
+      uu____9[0U] = 0x48U;
+      uu____9[1U] = 0x50U;
+      uu____9[2U] = 0x4bU;
+      uu____9[3U] = 0x45U;
+      uu____9[4U] = 0x2dU;
+      uu____9[5U] = 0x76U;
+      uu____9[6U] = 0x31U;
+      memcpy(tmp2 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp2 + 17U, label_info_hash, 9U * sizeof (uint8_t));
+      memcpy(tmp2 + 26U, info, infolen * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_info_hash, empty, 0U, tmp2, len1);
+      o_context[0U] = 0U;
+      memcpy(o_context + 1U, o_psk_id_hash, 32U * sizeof (uint8_t));
+      memcpy(o_context + 33U, o_info_hash, 32U * sizeof (uint8_t));
+      uint8_t label_secret[6U] = { 0x73U, 0x65U, 0x63U, 0x72U, 0x65U, 0x74U };
+      uint32_t len2 = 23U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len2);
       uint8_t *tmp3 = (uint8_t *)alloca(len2 * sizeof (uint8_t));
       memset(tmp3, 0U, len2 * sizeof (uint8_t));
       uint8_t *uu____10 = tmp3;
-      uu____10[0U] = (uint8_t)0x48U;
-      uu____10[1U] = (uint8_t)0x50U;
-      uu____10[2U] = (uint8_t)0x4bU;
-      uu____10[3U] = (uint8_t)0x45U;
-      uu____10[4U] = (uint8_t)0x2dU;
-      uu____10[5U] = (uint8_t)0x76U;
-      uu____10[6U] = (uint8_t)0x31U;
-      memcpy(tmp3 + (uint32_t)7U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)17U, label_secret, (uint32_t)6U * sizeof (uint8_t));
-      memcpy(tmp3 + (uint32_t)23U, empty, (uint32_t)0U * sizeof (uint8_t));
-      Hacl_HKDF_extract_sha2_256(o_secret, shared, (uint32_t)32U, tmp3, len2);
-      uint8_t label_exp[3U] = { (uint8_t)0x65U, (uint8_t)0x78U, (uint8_t)0x70U };
-      uint32_t len3 = (uint32_t)87U;
+      uu____10[0U] = 0x48U;
+      uu____10[1U] = 0x50U;
+      uu____10[2U] = 0x4bU;
+      uu____10[3U] = 0x45U;
+      uu____10[4U] = 0x2dU;
+      uu____10[5U] = 0x76U;
+      uu____10[6U] = 0x31U;
+      memcpy(tmp3 + 7U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp3 + 17U, label_secret, 6U * sizeof (uint8_t));
+      memcpy(tmp3 + 23U, empty, 0U * sizeof (uint8_t));
+      Hacl_HKDF_extract_sha2_256(o_secret, shared, 32U, tmp3, len2);
+      uint8_t label_exp[3U] = { 0x65U, 0x78U, 0x70U };
+      uint32_t len3 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len3);
       uint8_t *tmp4 = (uint8_t *)alloca(len3 * sizeof (uint8_t));
       memset(tmp4, 0U, len3 * sizeof (uint8_t));
-      store16_be(tmp4, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____11 = tmp4 + (uint32_t)2U;
-      uu____11[0U] = (uint8_t)0x48U;
-      uu____11[1U] = (uint8_t)0x50U;
-      uu____11[2U] = (uint8_t)0x4bU;
-      uu____11[3U] = (uint8_t)0x45U;
-      uu____11[4U] = (uint8_t)0x2dU;
-      uu____11[5U] = (uint8_t)0x76U;
-      uu____11[6U] = (uint8_t)0x31U;
-      memcpy(tmp4 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)19U, label_exp, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp4 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter,
-        o_secret,
-        (uint32_t)32U,
-        tmp4,
-        len3,
-        (uint32_t)32U);
-      uint8_t label_key[3U] = { (uint8_t)0x6bU, (uint8_t)0x65U, (uint8_t)0x79U };
-      uint32_t len4 = (uint32_t)87U;
+      store16_be(tmp4, (uint16_t)32U);
+      uint8_t *uu____11 = tmp4 + 2U;
+      uu____11[0U] = 0x48U;
+      uu____11[1U] = 0x50U;
+      uu____11[2U] = 0x4bU;
+      uu____11[3U] = 0x45U;
+      uu____11[4U] = 0x2dU;
+      uu____11[5U] = 0x76U;
+      uu____11[6U] = 0x31U;
+      memcpy(tmp4 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp4 + 19U, label_exp, 3U * sizeof (uint8_t));
+      memcpy(tmp4 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_exporter, o_secret, 32U, tmp4, len3, 32U);
+      uint8_t label_key[3U] = { 0x6bU, 0x65U, 0x79U };
+      uint32_t len4 = 87U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len4);
       uint8_t *tmp5 = (uint8_t *)alloca(len4 * sizeof (uint8_t));
       memset(tmp5, 0U, len4 * sizeof (uint8_t));
-      store16_be(tmp5, (uint16_t)(uint32_t)32U);
-      uint8_t *uu____12 = tmp5 + (uint32_t)2U;
-      uu____12[0U] = (uint8_t)0x48U;
-      uu____12[1U] = (uint8_t)0x50U;
-      uu____12[2U] = (uint8_t)0x4bU;
-      uu____12[3U] = (uint8_t)0x45U;
-      uu____12[4U] = (uint8_t)0x2dU;
-      uu____12[5U] = (uint8_t)0x76U;
-      uu____12[6U] = (uint8_t)0x31U;
-      memcpy(tmp5 + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)19U, label_key, (uint32_t)3U * sizeof (uint8_t));
-      memcpy(tmp5 + (uint32_t)22U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, (uint32_t)32U, tmp5, len4, (uint32_t)32U);
+      store16_be(tmp5, (uint16_t)32U);
+      uint8_t *uu____12 = tmp5 + 2U;
+      uu____12[0U] = 0x48U;
+      uu____12[1U] = 0x50U;
+      uu____12[2U] = 0x4bU;
+      uu____12[3U] = 0x45U;
+      uu____12[4U] = 0x2dU;
+      uu____12[5U] = 0x76U;
+      uu____12[6U] = 0x31U;
+      memcpy(tmp5 + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp5 + 19U, label_key, 3U * sizeof (uint8_t));
+      memcpy(tmp5 + 22U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_key, o_secret, 32U, tmp5, len4, 32U);
       uint8_t
       label_base_nonce[10U] =
-        {
-          (uint8_t)0x62U, (uint8_t)0x61U, (uint8_t)0x73U, (uint8_t)0x65U, (uint8_t)0x5fU,
-          (uint8_t)0x6eU, (uint8_t)0x6fU, (uint8_t)0x6eU, (uint8_t)0x63U, (uint8_t)0x65U
-        };
-      uint32_t len = (uint32_t)94U;
+        { 0x62U, 0x61U, 0x73U, 0x65U, 0x5fU, 0x6eU, 0x6fU, 0x6eU, 0x63U, 0x65U };
+      uint32_t len = 94U;
       KRML_CHECK_SIZE(sizeof (uint8_t), len);
       uint8_t *tmp = (uint8_t *)alloca(len * sizeof (uint8_t));
       memset(tmp, 0U, len * sizeof (uint8_t));
-      store16_be(tmp, (uint16_t)(uint32_t)12U);
-      uint8_t *uu____13 = tmp + (uint32_t)2U;
-      uu____13[0U] = (uint8_t)0x48U;
-      uu____13[1U] = (uint8_t)0x50U;
-      uu____13[2U] = (uint8_t)0x4bU;
-      uu____13[3U] = (uint8_t)0x45U;
-      uu____13[4U] = (uint8_t)0x2dU;
-      uu____13[5U] = (uint8_t)0x76U;
-      uu____13[6U] = (uint8_t)0x31U;
-      memcpy(tmp + (uint32_t)9U, suite_id, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)19U, label_base_nonce, (uint32_t)10U * sizeof (uint8_t));
-      memcpy(tmp + (uint32_t)29U, o_context, (uint32_t)65U * sizeof (uint8_t));
-      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, (uint32_t)32U, tmp, len, (uint32_t)12U);
-      o_ctx.ctx_seq[0U] = (uint64_t)0U;
-      return (uint32_t)0U;
+      store16_be(tmp, (uint16_t)12U);
+      uint8_t *uu____13 = tmp + 2U;
+      uu____13[0U] = 0x48U;
+      uu____13[1U] = 0x50U;
+      uu____13[2U] = 0x4bU;
+      uu____13[3U] = 0x45U;
+      uu____13[4U] = 0x2dU;
+      uu____13[5U] = 0x76U;
+      uu____13[6U] = 0x31U;
+      memcpy(tmp + 9U, suite_id, 10U * sizeof (uint8_t));
+      memcpy(tmp + 19U, label_base_nonce, 10U * sizeof (uint8_t));
+      memcpy(tmp + 29U, o_context, 65U * sizeof (uint8_t));
+      Hacl_HKDF_expand_sha2_256(o_ctx.ctx_nonce, o_secret, 32U, tmp, len, 12U);
+      o_ctx.ctx_seq[0U] = 0ULL;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -609,7 +554,7 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -620,43 +565,45 @@ Hacl_HPKE_P256_CP32_SHA256_sealBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP32_SHA256_setupBaseS(o_enc, o_ctx, skE, pkR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
-    Hacl_Chacha20Poly1305_32_aead_encrypt(o_ctx.ctx_key,
-      nonce,
-      aadlen,
-      aad,
-      plainlen,
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = o_ct;
+    uint8_t *tag = o_ct + plainlen;
+    Hacl_AEAD_Chacha20Poly1305_encrypt(cipher,
+      tag,
       plain,
-      o_ct,
-      o_ct + plainlen);
+      plainlen,
+      aad,
+      aadlen,
+      o_ctx.ctx_key,
+      nonce);
     uint64_t s1 = o_ctx.ctx_seq[0U];
     uint32_t res1;
-    if (s1 == (uint64_t)18446744073709551615U)
+    if (s1 == 18446744073709551615ULL)
     {
-      res1 = (uint32_t)1U;
+      res1 = 1U;
     }
     else
     {
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      res1 = (uint32_t)0U;
+      res1 = 0U;
     }
     uint32_t res10 = res1;
     return res10;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
 uint32_t
@@ -674,7 +621,7 @@ Hacl_HPKE_P256_CP32_SHA256_openBase(
 {
   uint8_t ctx_key[32U] = { 0U };
   uint8_t ctx_nonce[12U] = { 0U };
-  uint64_t ctx_seq = (uint64_t)0U;
+  uint64_t ctx_seq = 0ULL;
   uint8_t ctx_exporter[32U] = { 0U };
   Hacl_Impl_HPKE_context_s
   o_ctx =
@@ -685,42 +632,44 @@ Hacl_HPKE_P256_CP32_SHA256_openBase(
       .ctx_exporter = ctx_exporter
     };
   uint32_t res = Hacl_HPKE_P256_CP32_SHA256_setupBaseR(o_ctx, pkE, skR, infolen, info);
-  if (res == (uint32_t)0U)
+  if (res == 0U)
   {
     uint8_t nonce[12U] = { 0U };
     uint64_t s = o_ctx.ctx_seq[0U];
     uint8_t enc[12U] = { 0U };
-    store64_be(enc + (uint32_t)4U, s);
+    store64_be(enc + 4U, s);
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint8_t xi = enc[i];
       uint8_t yi = o_ctx.ctx_nonce[i];
-      nonce[i] = xi ^ yi;);
+      nonce[i] = (uint32_t)xi ^ (uint32_t)yi;);
+    uint8_t *cipher = ct;
+    uint8_t *tag = ct + ctlen - 16U;
     uint32_t
     res1 =
-      Hacl_Chacha20Poly1305_32_aead_decrypt(o_ctx.ctx_key,
-        nonce,
-        aadlen,
+      Hacl_AEAD_Chacha20Poly1305_decrypt(o_pt,
+        cipher,
+        ctlen - 16U,
         aad,
-        ctlen - (uint32_t)16U,
-        o_pt,
-        ct,
-        ct + ctlen - (uint32_t)16U);
-    if (res1 == (uint32_t)0U)
+        aadlen,
+        o_ctx.ctx_key,
+        nonce,
+        tag);
+    if (res1 == 0U)
     {
       uint64_t s1 = o_ctx.ctx_seq[0U];
-      if (s1 == (uint64_t)18446744073709551615U)
+      if (s1 == 18446744073709551615ULL)
       {
-        return (uint32_t)1U;
+        return 1U;
       }
-      uint64_t s_ = s1 + (uint64_t)1U;
+      uint64_t s_ = s1 + 1ULL;
       o_ctx.ctx_seq[0U] = s_;
-      return (uint32_t)0U;
+      return 0U;
     }
-    return (uint32_t)1U;
+    return 1U;
   }
-  return (uint32_t)1U;
+  return 1U;
 }
 
diff --git a/src/msvc/Hacl_Hash_Base.c b/src/msvc/Hacl_Hash_Base.c
index 40796f14..02d893e3 100644
--- a/src/msvc/Hacl_Hash_Base.c
+++ b/src/msvc/Hacl_Hash_Base.c
@@ -31,27 +31,27 @@ uint32_t Hacl_Hash_Definitions_word_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     default:
       {
@@ -67,59 +67,59 @@ uint32_t Hacl_Hash_Definitions_block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)128U;
+        return 128U;
       }
     default:
       {
@@ -135,27 +135,27 @@ uint32_t Hacl_Hash_Definitions_hash_word_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)4U;
+        return 4U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)5U;
+        return 5U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)7U;
+        return 7U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)6U;
+        return 6U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)8U;
+        return 8U;
       }
     default:
       {
@@ -171,51 +171,51 @@ uint32_t Hacl_Hash_Definitions_hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)20U;
+        return 20U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
diff --git a/src/msvc/Hacl_Hash_Blake2.c b/src/msvc/Hacl_Hash_Blake2.c
deleted file mode 100644
index aecc6165..00000000
--- a/src/msvc/Hacl_Hash_Blake2.c
+++ /dev/null
@@ -1,1324 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "internal/Hacl_Hash_Blake2.h"
-
-#include "internal/Hacl_Impl_Blake2_Constants.h"
-#include "lib_memzero0.h"
-
-static void
-blake2b_update_block(
-  uint64_t *wv,
-  uint64_t *hash,
-  bool flag,
-  FStar_UInt128_uint128 totlen,
-  uint8_t *d
-)
-{
-  uint64_t m_w[16U] = { 0U };
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  uint64_t mask[4U] = { 0U };
-  uint64_t wv_14;
-  if (flag)
-  {
-    wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  }
-  else
-  {
-    wv_14 = (uint64_t)0U;
-  }
-  uint64_t wv_15 = (uint64_t)0U;
-  mask[0U] = FStar_UInt128_uint128_to_uint64(totlen);
-  mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U));
-  mask[2U] = wv_14;
-  mask[3U] = wv_15;
-  memcpy(wv, hash, (uint32_t)16U * sizeof (uint64_t));
-  uint64_t *wv3 = wv + (uint32_t)12U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = wv3;
-    uint64_t x = wv3[i] ^ mask[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR12(i0,
-    (uint32_t)0U,
-    (uint32_t)12U,
-    (uint32_t)1U,
-    uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U;
-    uint64_t m_st[16U] = { 0U };
-    uint64_t *r0 = m_st;
-    uint64_t *r1 = m_st + (uint32_t)4U;
-    uint64_t *r20 = m_st + (uint32_t)8U;
-    uint64_t *r30 = m_st + (uint32_t)12U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
-    uint64_t uu____0 = m_w[s2];
-    uint64_t uu____1 = m_w[s4];
-    uint64_t uu____2 = m_w[s6];
-    r0[0U] = m_w[s0];
-    r0[1U] = uu____0;
-    r0[2U] = uu____1;
-    r0[3U] = uu____2;
-    uint64_t uu____3 = m_w[s3];
-    uint64_t uu____4 = m_w[s5];
-    uint64_t uu____5 = m_w[s7];
-    r1[0U] = m_w[s1];
-    r1[1U] = uu____3;
-    r1[2U] = uu____4;
-    r1[3U] = uu____5;
-    uint64_t uu____6 = m_w[s10];
-    uint64_t uu____7 = m_w[s12];
-    uint64_t uu____8 = m_w[s14];
-    r20[0U] = m_w[s8];
-    r20[1U] = uu____6;
-    r20[2U] = uu____7;
-    r20[3U] = uu____8;
-    uint64_t uu____9 = m_w[s11];
-    uint64_t uu____10 = m_w[s13];
-    uint64_t uu____11 = m_w[s15];
-    r30[0U] = m_w[s9];
-    r30[1U] = uu____9;
-    r30[2U] = uu____10;
-    r30[3U] = uu____11;
-    uint64_t *x = m_st;
-    uint64_t *y = m_st + (uint32_t)4U;
-    uint64_t *z = m_st + (uint32_t)8U;
-    uint64_t *w = m_st + (uint32_t)12U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    uint64_t *wv_a0 = wv + a * (uint32_t)4U;
-    uint64_t *wv_b0 = wv + b0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a0;
-      uint64_t x1 = wv_a0[i] + wv_b0[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a0;
-      uint64_t x1 = wv_a0[i] + x[i];
-      os[i] = x1;);
-    uint64_t *wv_a1 = wv + d10 * (uint32_t)4U;
-    uint64_t *wv_b1 = wv + a * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a1;
-      uint64_t x1 = wv_a1[i] ^ wv_b1[i];
-      os[i] = x1;);
-    uint64_t *r10 = wv_a1;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r10;
-      uint64_t x1 = r10[i];
-      uint64_t x10 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U;
-      os[i] = x10;);
-    uint64_t *wv_a2 = wv + c0 * (uint32_t)4U;
-    uint64_t *wv_b2 = wv + d10 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a2;
-      uint64_t x1 = wv_a2[i] + wv_b2[i];
-      os[i] = x1;);
-    uint64_t *wv_a3 = wv + b0 * (uint32_t)4U;
-    uint64_t *wv_b3 = wv + c0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a3;
-      uint64_t x1 = wv_a3[i] ^ wv_b3[i];
-      os[i] = x1;);
-    uint64_t *r12 = wv_a3;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r12;
-      uint64_t x1 = r12[i];
-      uint64_t x10 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U;
-      os[i] = x10;);
-    uint64_t *wv_a4 = wv + a * (uint32_t)4U;
-    uint64_t *wv_b4 = wv + b0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a4;
-      uint64_t x1 = wv_a4[i] + wv_b4[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a4;
-      uint64_t x1 = wv_a4[i] + y[i];
-      os[i] = x1;);
-    uint64_t *wv_a5 = wv + d10 * (uint32_t)4U;
-    uint64_t *wv_b5 = wv + a * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a5;
-      uint64_t x1 = wv_a5[i] ^ wv_b5[i];
-      os[i] = x1;);
-    uint64_t *r13 = wv_a5;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r13;
-      uint64_t x1 = r13[i];
-      uint64_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U;
-      os[i] = x10;);
-    uint64_t *wv_a6 = wv + c0 * (uint32_t)4U;
-    uint64_t *wv_b6 = wv + d10 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a6;
-      uint64_t x1 = wv_a6[i] + wv_b6[i];
-      os[i] = x1;);
-    uint64_t *wv_a7 = wv + b0 * (uint32_t)4U;
-    uint64_t *wv_b7 = wv + c0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a7;
-      uint64_t x1 = wv_a7[i] ^ wv_b7[i];
-      os[i] = x1;);
-    uint64_t *r14 = wv_a7;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r14;
-      uint64_t x1 = r14[i];
-      uint64_t x10 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U;
-      os[i] = x10;);
-    uint64_t *r15 = wv + (uint32_t)4U;
-    uint64_t *r21 = wv + (uint32_t)8U;
-    uint64_t *r31 = wv + (uint32_t)12U;
-    uint64_t *r110 = r15;
-    uint64_t x00 = r110[1U];
-    uint64_t x10 = r110[2U];
-    uint64_t x20 = r110[3U];
-    uint64_t x30 = r110[0U];
-    r110[0U] = x00;
-    r110[1U] = x10;
-    r110[2U] = x20;
-    r110[3U] = x30;
-    uint64_t *r111 = r21;
-    uint64_t x01 = r111[2U];
-    uint64_t x11 = r111[3U];
-    uint64_t x21 = r111[0U];
-    uint64_t x31 = r111[1U];
-    r111[0U] = x01;
-    r111[1U] = x11;
-    r111[2U] = x21;
-    r111[3U] = x31;
-    uint64_t *r112 = r31;
-    uint64_t x02 = r112[3U];
-    uint64_t x12 = r112[0U];
-    uint64_t x22 = r112[1U];
-    uint64_t x32 = r112[2U];
-    r112[0U] = x02;
-    r112[1U] = x12;
-    r112[2U] = x22;
-    r112[3U] = x32;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    uint64_t *wv_a = wv + a0 * (uint32_t)4U;
-    uint64_t *wv_b8 = wv + b * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a;
-      uint64_t x1 = wv_a[i] + wv_b8[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a;
-      uint64_t x1 = wv_a[i] + z[i];
-      os[i] = x1;);
-    uint64_t *wv_a8 = wv + d1 * (uint32_t)4U;
-    uint64_t *wv_b9 = wv + a0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a8;
-      uint64_t x1 = wv_a8[i] ^ wv_b9[i];
-      os[i] = x1;);
-    uint64_t *r16 = wv_a8;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r16;
-      uint64_t x1 = r16[i];
-      uint64_t x13 = x1 >> (uint32_t)32U | x1 << (uint32_t)32U;
-      os[i] = x13;);
-    uint64_t *wv_a9 = wv + c * (uint32_t)4U;
-    uint64_t *wv_b10 = wv + d1 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a9;
-      uint64_t x1 = wv_a9[i] + wv_b10[i];
-      os[i] = x1;);
-    uint64_t *wv_a10 = wv + b * (uint32_t)4U;
-    uint64_t *wv_b11 = wv + c * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a10;
-      uint64_t x1 = wv_a10[i] ^ wv_b11[i];
-      os[i] = x1;);
-    uint64_t *r17 = wv_a10;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r17;
-      uint64_t x1 = r17[i];
-      uint64_t x13 = x1 >> (uint32_t)24U | x1 << (uint32_t)40U;
-      os[i] = x13;);
-    uint64_t *wv_a11 = wv + a0 * (uint32_t)4U;
-    uint64_t *wv_b12 = wv + b * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a11;
-      uint64_t x1 = wv_a11[i] + wv_b12[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a11;
-      uint64_t x1 = wv_a11[i] + w[i];
-      os[i] = x1;);
-    uint64_t *wv_a12 = wv + d1 * (uint32_t)4U;
-    uint64_t *wv_b13 = wv + a0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a12;
-      uint64_t x1 = wv_a12[i] ^ wv_b13[i];
-      os[i] = x1;);
-    uint64_t *r18 = wv_a12;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r18;
-      uint64_t x1 = r18[i];
-      uint64_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)48U;
-      os[i] = x13;);
-    uint64_t *wv_a13 = wv + c * (uint32_t)4U;
-    uint64_t *wv_b14 = wv + d1 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a13;
-      uint64_t x1 = wv_a13[i] + wv_b14[i];
-      os[i] = x1;);
-    uint64_t *wv_a14 = wv + b * (uint32_t)4U;
-    uint64_t *wv_b = wv + c * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = wv_a14;
-      uint64_t x1 = wv_a14[i] ^ wv_b[i];
-      os[i] = x1;);
-    uint64_t *r19 = wv_a14;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint64_t *os = r19;
-      uint64_t x1 = r19[i];
-      uint64_t x13 = x1 >> (uint32_t)63U | x1 << (uint32_t)1U;
-      os[i] = x13;);
-    uint64_t *r113 = wv + (uint32_t)4U;
-    uint64_t *r2 = wv + (uint32_t)8U;
-    uint64_t *r3 = wv + (uint32_t)12U;
-    uint64_t *r11 = r113;
-    uint64_t x03 = r11[3U];
-    uint64_t x13 = r11[0U];
-    uint64_t x23 = r11[1U];
-    uint64_t x33 = r11[2U];
-    r11[0U] = x03;
-    r11[1U] = x13;
-    r11[2U] = x23;
-    r11[3U] = x33;
-    uint64_t *r114 = r2;
-    uint64_t x04 = r114[2U];
-    uint64_t x14 = r114[3U];
-    uint64_t x24 = r114[0U];
-    uint64_t x34 = r114[1U];
-    r114[0U] = x04;
-    r114[1U] = x14;
-    r114[2U] = x24;
-    r114[3U] = x34;
-    uint64_t *r115 = r3;
-    uint64_t x0 = r115[1U];
-    uint64_t x1 = r115[2U];
-    uint64_t x2 = r115[3U];
-    uint64_t x3 = r115[0U];
-    r115[0U] = x0;
-    r115[1U] = x1;
-    r115[2U] = x2;
-    r115[3U] = x3;);
-  uint64_t *s0 = hash;
-  uint64_t *s1 = hash + (uint32_t)4U;
-  uint64_t *r0 = wv;
-  uint64_t *r1 = wv + (uint32_t)4U;
-  uint64_t *r2 = wv + (uint32_t)8U;
-  uint64_t *r3 = wv + (uint32_t)12U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = s0;
-    uint64_t x = s0[i] ^ r0[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = s0;
-    uint64_t x = s0[i] ^ r2[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = s1;
-    uint64_t x = s1[i] ^ r1[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = s1;
-    uint64_t x = s1[i] ^ r3[i];
-    os[i] = x;);
-}
-
-void Hacl_Blake2b_32_blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn)
-{
-  uint64_t *r0 = hash;
-  uint64_t *r1 = hash + (uint32_t)4U;
-  uint64_t *r2 = hash + (uint32_t)8U;
-  uint64_t *r3 = hash + (uint32_t)12U;
-  uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U];
-  uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U];
-  uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U];
-  uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U];
-  uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U];
-  uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U];
-  uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U];
-  uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U];
-  r2[0U] = iv0;
-  r2[1U] = iv1;
-  r2[2U] = iv2;
-  r2[3U] = iv3;
-  r3[0U] = iv4;
-  r3[1U] = iv5;
-  r3[2U] = iv6;
-  r3[3U] = iv7;
-  uint64_t kk_shift_8 = (uint64_t)kk << (uint32_t)8U;
-  uint64_t iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn));
-  r0[0U] = iv0_;
-  r0[1U] = iv1;
-  r0[2U] = iv2;
-  r0[3U] = iv3;
-  r1[0U] = iv4;
-  r1[1U] = iv5;
-  r1[2U] = iv6;
-  r1[3U] = iv7;
-}
-
-void
-Hacl_Blake2b_32_blake2b_update_key(
-  uint64_t *wv,
-  uint64_t *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-)
-{
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  uint8_t b[128U] = { 0U };
-  memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
-  {
-    blake2b_update_block(wv, hash, true, lb, b);
-  }
-  else
-  {
-    blake2b_update_block(wv, hash, false, lb, b);
-  }
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
-}
-
-void
-Hacl_Blake2b_32_blake2b_update_multi(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks,
-  uint32_t nb
-)
-{
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    FStar_UInt128_uint128
-    totlen =
-      FStar_UInt128_add_mod(prev,
-        FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U)));
-    uint8_t *b = blocks + i * (uint32_t)128U;
-    blake2b_update_block(wv, hash, false, totlen, b);
-  }
-}
-
-void
-Hacl_Blake2b_32_blake2b_update_last(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint32_t rem,
-  uint8_t *d
-)
-{
-  uint8_t b[128U] = { 0U };
-  uint8_t *last = d + len - rem;
-  memcpy(b, last, rem * sizeof (uint8_t));
-  FStar_UInt128_uint128
-  totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
-  blake2b_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
-}
-
-static void
-blake2b_update_blocks(
-  uint32_t len,
-  uint64_t *wv,
-  uint64_t *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks
-)
-{
-  uint32_t nb0 = len / (uint32_t)128U;
-  uint32_t rem0 = len % (uint32_t)128U;
-  K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
-  {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)128U;
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
-  }
-  else
-  {
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb0, .snd = rem0 });
-  }
-  uint32_t nb = scrut.fst;
-  uint32_t rem = scrut.snd;
-  Hacl_Blake2b_32_blake2b_update_multi(len, wv, hash, prev, blocks, nb);
-  Hacl_Blake2b_32_blake2b_update_last(len, wv, hash, prev, rem, blocks);
-}
-
-static inline void
-blake2b_update(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
-{
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  if (kk > (uint32_t)0U)
-  {
-    Hacl_Blake2b_32_blake2b_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
-    {
-      blake2b_update_blocks(ll, wv, hash, lb, d);
-      return;
-    }
-    return;
-  }
-  blake2b_update_blocks(ll,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U),
-    d);
-}
-
-void Hacl_Blake2b_32_blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash)
-{
-  uint8_t b[64U] = { 0U };
-  uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)32U;
-  uint64_t *row0 = hash;
-  uint64_t *row1 = hash + (uint32_t)4U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(first + i * (uint32_t)8U, row0[i]););
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_le(second + i * (uint32_t)8U, row1[i]););
-  uint8_t *final = b;
-  memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-/**
-Write the BLAKE2b digest of message `d` using key `k` into `output`.
-
-@param nn Length of the to-be-generated digest with 1 <= `nn` <= 64.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2b_32_blake2b(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-)
-{
-  uint64_t b[16U] = { 0U };
-  uint64_t b1[16U] = { 0U };
-  Hacl_Blake2b_32_blake2b_init(b, kk, nn);
-  blake2b_update(b1, b, kk, k, ll, d);
-  Hacl_Blake2b_32_blake2b_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)16U, uint64_t);
-  Lib_Memzero0_memzero(b, (uint32_t)16U, uint64_t);
-}
-
-uint64_t *Hacl_Blake2b_32_blake2b_malloc(void)
-{
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
-  return buf;
-}
-
-static inline void
-blake2s_update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t *d)
-{
-  uint32_t m_w[16U] = { 0U };
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint32_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  uint32_t mask[4U] = { 0U };
-  uint32_t wv_14;
-  if (flag)
-  {
-    wv_14 = (uint32_t)0xFFFFFFFFU;
-  }
-  else
-  {
-    wv_14 = (uint32_t)0U;
-  }
-  uint32_t wv_15 = (uint32_t)0U;
-  mask[0U] = (uint32_t)totlen;
-  mask[1U] = (uint32_t)(totlen >> (uint32_t)32U);
-  mask[2U] = wv_14;
-  mask[3U] = wv_15;
-  memcpy(wv, hash, (uint32_t)16U * sizeof (uint32_t));
-  uint32_t *wv3 = wv + (uint32_t)12U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = wv3;
-    uint32_t x = wv3[i] ^ mask[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR10(i0,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
-    uint32_t start_idx = i0 % (uint32_t)10U * (uint32_t)16U;
-    uint32_t m_st[16U] = { 0U };
-    uint32_t *r0 = m_st;
-    uint32_t *r1 = m_st + (uint32_t)4U;
-    uint32_t *r20 = m_st + (uint32_t)8U;
-    uint32_t *r30 = m_st + (uint32_t)12U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
-    uint32_t uu____0 = m_w[s2];
-    uint32_t uu____1 = m_w[s4];
-    uint32_t uu____2 = m_w[s6];
-    r0[0U] = m_w[s0];
-    r0[1U] = uu____0;
-    r0[2U] = uu____1;
-    r0[3U] = uu____2;
-    uint32_t uu____3 = m_w[s3];
-    uint32_t uu____4 = m_w[s5];
-    uint32_t uu____5 = m_w[s7];
-    r1[0U] = m_w[s1];
-    r1[1U] = uu____3;
-    r1[2U] = uu____4;
-    r1[3U] = uu____5;
-    uint32_t uu____6 = m_w[s10];
-    uint32_t uu____7 = m_w[s12];
-    uint32_t uu____8 = m_w[s14];
-    r20[0U] = m_w[s8];
-    r20[1U] = uu____6;
-    r20[2U] = uu____7;
-    r20[3U] = uu____8;
-    uint32_t uu____9 = m_w[s11];
-    uint32_t uu____10 = m_w[s13];
-    uint32_t uu____11 = m_w[s15];
-    r30[0U] = m_w[s9];
-    r30[1U] = uu____9;
-    r30[2U] = uu____10;
-    r30[3U] = uu____11;
-    uint32_t *x = m_st;
-    uint32_t *y = m_st + (uint32_t)4U;
-    uint32_t *z = m_st + (uint32_t)8U;
-    uint32_t *w = m_st + (uint32_t)12U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    uint32_t *wv_a0 = wv + a * (uint32_t)4U;
-    uint32_t *wv_b0 = wv + b0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a0;
-      uint32_t x1 = wv_a0[i] + wv_b0[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a0;
-      uint32_t x1 = wv_a0[i] + x[i];
-      os[i] = x1;);
-    uint32_t *wv_a1 = wv + d10 * (uint32_t)4U;
-    uint32_t *wv_b1 = wv + a * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a1;
-      uint32_t x1 = wv_a1[i] ^ wv_b1[i];
-      os[i] = x1;);
-    uint32_t *r10 = wv_a1;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r10;
-      uint32_t x1 = r10[i];
-      uint32_t x10 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U;
-      os[i] = x10;);
-    uint32_t *wv_a2 = wv + c0 * (uint32_t)4U;
-    uint32_t *wv_b2 = wv + d10 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a2;
-      uint32_t x1 = wv_a2[i] + wv_b2[i];
-      os[i] = x1;);
-    uint32_t *wv_a3 = wv + b0 * (uint32_t)4U;
-    uint32_t *wv_b3 = wv + c0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a3;
-      uint32_t x1 = wv_a3[i] ^ wv_b3[i];
-      os[i] = x1;);
-    uint32_t *r12 = wv_a3;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r12;
-      uint32_t x1 = r12[i];
-      uint32_t x10 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U;
-      os[i] = x10;);
-    uint32_t *wv_a4 = wv + a * (uint32_t)4U;
-    uint32_t *wv_b4 = wv + b0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a4;
-      uint32_t x1 = wv_a4[i] + wv_b4[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a4;
-      uint32_t x1 = wv_a4[i] + y[i];
-      os[i] = x1;);
-    uint32_t *wv_a5 = wv + d10 * (uint32_t)4U;
-    uint32_t *wv_b5 = wv + a * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a5;
-      uint32_t x1 = wv_a5[i] ^ wv_b5[i];
-      os[i] = x1;);
-    uint32_t *r13 = wv_a5;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r13;
-      uint32_t x1 = r13[i];
-      uint32_t x10 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U;
-      os[i] = x10;);
-    uint32_t *wv_a6 = wv + c0 * (uint32_t)4U;
-    uint32_t *wv_b6 = wv + d10 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a6;
-      uint32_t x1 = wv_a6[i] + wv_b6[i];
-      os[i] = x1;);
-    uint32_t *wv_a7 = wv + b0 * (uint32_t)4U;
-    uint32_t *wv_b7 = wv + c0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a7;
-      uint32_t x1 = wv_a7[i] ^ wv_b7[i];
-      os[i] = x1;);
-    uint32_t *r14 = wv_a7;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r14;
-      uint32_t x1 = r14[i];
-      uint32_t x10 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U;
-      os[i] = x10;);
-    uint32_t *r15 = wv + (uint32_t)4U;
-    uint32_t *r21 = wv + (uint32_t)8U;
-    uint32_t *r31 = wv + (uint32_t)12U;
-    uint32_t *r110 = r15;
-    uint32_t x00 = r110[1U];
-    uint32_t x10 = r110[2U];
-    uint32_t x20 = r110[3U];
-    uint32_t x30 = r110[0U];
-    r110[0U] = x00;
-    r110[1U] = x10;
-    r110[2U] = x20;
-    r110[3U] = x30;
-    uint32_t *r111 = r21;
-    uint32_t x01 = r111[2U];
-    uint32_t x11 = r111[3U];
-    uint32_t x21 = r111[0U];
-    uint32_t x31 = r111[1U];
-    r111[0U] = x01;
-    r111[1U] = x11;
-    r111[2U] = x21;
-    r111[3U] = x31;
-    uint32_t *r112 = r31;
-    uint32_t x02 = r112[3U];
-    uint32_t x12 = r112[0U];
-    uint32_t x22 = r112[1U];
-    uint32_t x32 = r112[2U];
-    r112[0U] = x02;
-    r112[1U] = x12;
-    r112[2U] = x22;
-    r112[3U] = x32;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    uint32_t *wv_a = wv + a0 * (uint32_t)4U;
-    uint32_t *wv_b8 = wv + b * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a;
-      uint32_t x1 = wv_a[i] + wv_b8[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a;
-      uint32_t x1 = wv_a[i] + z[i];
-      os[i] = x1;);
-    uint32_t *wv_a8 = wv + d1 * (uint32_t)4U;
-    uint32_t *wv_b9 = wv + a0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a8;
-      uint32_t x1 = wv_a8[i] ^ wv_b9[i];
-      os[i] = x1;);
-    uint32_t *r16 = wv_a8;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r16;
-      uint32_t x1 = r16[i];
-      uint32_t x13 = x1 >> (uint32_t)16U | x1 << (uint32_t)16U;
-      os[i] = x13;);
-    uint32_t *wv_a9 = wv + c * (uint32_t)4U;
-    uint32_t *wv_b10 = wv + d1 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a9;
-      uint32_t x1 = wv_a9[i] + wv_b10[i];
-      os[i] = x1;);
-    uint32_t *wv_a10 = wv + b * (uint32_t)4U;
-    uint32_t *wv_b11 = wv + c * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a10;
-      uint32_t x1 = wv_a10[i] ^ wv_b11[i];
-      os[i] = x1;);
-    uint32_t *r17 = wv_a10;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r17;
-      uint32_t x1 = r17[i];
-      uint32_t x13 = x1 >> (uint32_t)12U | x1 << (uint32_t)20U;
-      os[i] = x13;);
-    uint32_t *wv_a11 = wv + a0 * (uint32_t)4U;
-    uint32_t *wv_b12 = wv + b * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a11;
-      uint32_t x1 = wv_a11[i] + wv_b12[i];
-      os[i] = x1;);
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a11;
-      uint32_t x1 = wv_a11[i] + w[i];
-      os[i] = x1;);
-    uint32_t *wv_a12 = wv + d1 * (uint32_t)4U;
-    uint32_t *wv_b13 = wv + a0 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a12;
-      uint32_t x1 = wv_a12[i] ^ wv_b13[i];
-      os[i] = x1;);
-    uint32_t *r18 = wv_a12;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r18;
-      uint32_t x1 = r18[i];
-      uint32_t x13 = x1 >> (uint32_t)8U | x1 << (uint32_t)24U;
-      os[i] = x13;);
-    uint32_t *wv_a13 = wv + c * (uint32_t)4U;
-    uint32_t *wv_b14 = wv + d1 * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a13;
-      uint32_t x1 = wv_a13[i] + wv_b14[i];
-      os[i] = x1;);
-    uint32_t *wv_a14 = wv + b * (uint32_t)4U;
-    uint32_t *wv_b = wv + c * (uint32_t)4U;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = wv_a14;
-      uint32_t x1 = wv_a14[i] ^ wv_b[i];
-      os[i] = x1;);
-    uint32_t *r19 = wv_a14;
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      uint32_t *os = r19;
-      uint32_t x1 = r19[i];
-      uint32_t x13 = x1 >> (uint32_t)7U | x1 << (uint32_t)25U;
-      os[i] = x13;);
-    uint32_t *r113 = wv + (uint32_t)4U;
-    uint32_t *r2 = wv + (uint32_t)8U;
-    uint32_t *r3 = wv + (uint32_t)12U;
-    uint32_t *r11 = r113;
-    uint32_t x03 = r11[3U];
-    uint32_t x13 = r11[0U];
-    uint32_t x23 = r11[1U];
-    uint32_t x33 = r11[2U];
-    r11[0U] = x03;
-    r11[1U] = x13;
-    r11[2U] = x23;
-    r11[3U] = x33;
-    uint32_t *r114 = r2;
-    uint32_t x04 = r114[2U];
-    uint32_t x14 = r114[3U];
-    uint32_t x24 = r114[0U];
-    uint32_t x34 = r114[1U];
-    r114[0U] = x04;
-    r114[1U] = x14;
-    r114[2U] = x24;
-    r114[3U] = x34;
-    uint32_t *r115 = r3;
-    uint32_t x0 = r115[1U];
-    uint32_t x1 = r115[2U];
-    uint32_t x2 = r115[3U];
-    uint32_t x3 = r115[0U];
-    r115[0U] = x0;
-    r115[1U] = x1;
-    r115[2U] = x2;
-    r115[3U] = x3;);
-  uint32_t *s0 = hash;
-  uint32_t *s1 = hash + (uint32_t)4U;
-  uint32_t *r0 = wv;
-  uint32_t *r1 = wv + (uint32_t)4U;
-  uint32_t *r2 = wv + (uint32_t)8U;
-  uint32_t *r3 = wv + (uint32_t)12U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = s0;
-    uint32_t x = s0[i] ^ r0[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = s0;
-    uint32_t x = s0[i] ^ r2[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = s1;
-    uint32_t x = s1[i] ^ r1[i];
-    os[i] = x;);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = s1;
-    uint32_t x = s1[i] ^ r3[i];
-    os[i] = x;);
-}
-
-void Hacl_Blake2s_32_blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn)
-{
-  uint32_t *r0 = hash;
-  uint32_t *r1 = hash + (uint32_t)4U;
-  uint32_t *r2 = hash + (uint32_t)8U;
-  uint32_t *r3 = hash + (uint32_t)12U;
-  uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U];
-  uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U];
-  uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U];
-  uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U];
-  uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U];
-  uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U];
-  uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U];
-  uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U];
-  r2[0U] = iv0;
-  r2[1U] = iv1;
-  r2[2U] = iv2;
-  r2[3U] = iv3;
-  r3[0U] = iv4;
-  r3[1U] = iv5;
-  r3[2U] = iv6;
-  r3[3U] = iv7;
-  uint32_t kk_shift_8 = kk << (uint32_t)8U;
-  uint32_t iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn));
-  r0[0U] = iv0_;
-  r0[1U] = iv1;
-  r0[2U] = iv2;
-  r0[3U] = iv3;
-  r1[0U] = iv4;
-  r1[1U] = iv5;
-  r1[2U] = iv6;
-  r1[3U] = iv7;
-}
-
-void
-Hacl_Blake2s_32_blake2s_update_key(
-  uint32_t *wv,
-  uint32_t *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-)
-{
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  uint8_t b[64U] = { 0U };
-  memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
-  {
-    blake2s_update_block(wv, hash, true, lb, b);
-  }
-  else
-  {
-    blake2s_update_block(wv, hash, false, lb, b);
-  }
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-void
-Hacl_Blake2s_32_blake2s_update_multi(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint8_t *blocks,
-  uint32_t nb
-)
-{
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U);
-    uint8_t *b = blocks + i * (uint32_t)64U;
-    blake2s_update_block(wv, hash, false, totlen, b);
-  }
-}
-
-void
-Hacl_Blake2s_32_blake2s_update_last(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint32_t rem,
-  uint8_t *d
-)
-{
-  uint8_t b[64U] = { 0U };
-  uint8_t *last = d + len - rem;
-  memcpy(b, last, rem * sizeof (uint8_t));
-  uint64_t totlen = prev + (uint64_t)len;
-  blake2s_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-static void
-blake2s_update_blocks(
-  uint32_t len,
-  uint32_t *wv,
-  uint32_t *hash,
-  uint64_t prev,
-  uint8_t *blocks
-)
-{
-  uint32_t nb0 = len / (uint32_t)64U;
-  uint32_t rem0 = len % (uint32_t)64U;
-  K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
-  {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)64U;
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
-  }
-  else
-  {
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb0, .snd = rem0 });
-  }
-  uint32_t nb = scrut.fst;
-  uint32_t rem = scrut.snd;
-  Hacl_Blake2s_32_blake2s_update_multi(len, wv, hash, prev, blocks, nb);
-  Hacl_Blake2s_32_blake2s_update_last(len, wv, hash, prev, rem, blocks);
-}
-
-static inline void
-blake2s_update(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
-{
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  if (kk > (uint32_t)0U)
-  {
-    Hacl_Blake2s_32_blake2s_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
-    {
-      blake2s_update_blocks(ll, wv, hash, lb, d);
-      return;
-    }
-    return;
-  }
-  blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d);
-}
-
-void Hacl_Blake2s_32_blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash)
-{
-  uint8_t b[32U] = { 0U };
-  uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)16U;
-  uint32_t *row0 = hash;
-  uint32_t *row1 = hash + (uint32_t)4U;
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(first + i * (uint32_t)4U, row0[i]););
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(second + i * (uint32_t)4U, row1[i]););
-  uint8_t *final = b;
-  memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)32U, uint8_t);
-}
-
-/**
-Write the BLAKE2s digest of message `d` using key `k` into `output`.
-
-@param nn Length of to-be-generated digest with 1 <= `nn` <= 32.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2s_32_blake2s(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-)
-{
-  uint32_t b[16U] = { 0U };
-  uint32_t b1[16U] = { 0U };
-  Hacl_Blake2s_32_blake2s_init(b, kk, nn);
-  blake2s_update(b1, b, kk, k, ll, d);
-  Hacl_Blake2s_32_blake2s_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)16U, uint32_t);
-  Lib_Memzero0_memzero(b, (uint32_t)16U, uint32_t);
-}
-
-uint32_t *Hacl_Blake2s_32_blake2s_malloc(void)
-{
-  uint32_t *buf = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
-  return buf;
-}
-
diff --git a/src/msvc/Hacl_Hash_Blake2b.c b/src/msvc/Hacl_Hash_Blake2b.c
new file mode 100644
index 00000000..2dceaf4b
--- /dev/null
+++ b/src/msvc/Hacl_Hash_Blake2b.c
@@ -0,0 +1,971 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_Blake2b.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "lib_memzero0.h"
+
+static void
+update_block(uint64_t *wv, uint64_t *hash, bool flag, FStar_UInt128_uint128 totlen, uint8_t *d)
+{
+  uint64_t m_w[16U] = { 0U };
+  KRML_MAYBE_FOR16(i,
+    0U,
+    16U,
+    1U,
+    uint64_t *os = m_w;
+    uint8_t *bj = d + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  uint64_t mask[4U] = { 0U };
+  uint64_t wv_14;
+  if (flag)
+  {
+    wv_14 = 0xFFFFFFFFFFFFFFFFULL;
+  }
+  else
+  {
+    wv_14 = 0ULL;
+  }
+  uint64_t wv_15 = 0ULL;
+  mask[0U] = FStar_UInt128_uint128_to_uint64(totlen);
+  mask[1U] = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U));
+  mask[2U] = wv_14;
+  mask[3U] = wv_15;
+  memcpy(wv, hash, 16U * sizeof (uint64_t));
+  uint64_t *wv3 = wv + 12U;
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = wv3;
+    uint64_t x = wv3[i] ^ mask[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR12(i0,
+    0U,
+    12U,
+    1U,
+    uint32_t start_idx = i0 % 10U * 16U;
+    uint64_t m_st[16U] = { 0U };
+    uint64_t *r0 = m_st;
+    uint64_t *r1 = m_st + 4U;
+    uint64_t *r20 = m_st + 8U;
+    uint64_t *r30 = m_st + 12U;
+    uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U];
+    uint64_t uu____0 = m_w[s2];
+    uint64_t uu____1 = m_w[s4];
+    uint64_t uu____2 = m_w[s6];
+    r0[0U] = m_w[s0];
+    r0[1U] = uu____0;
+    r0[2U] = uu____1;
+    r0[3U] = uu____2;
+    uint64_t uu____3 = m_w[s3];
+    uint64_t uu____4 = m_w[s5];
+    uint64_t uu____5 = m_w[s7];
+    r1[0U] = m_w[s1];
+    r1[1U] = uu____3;
+    r1[2U] = uu____4;
+    r1[3U] = uu____5;
+    uint64_t uu____6 = m_w[s10];
+    uint64_t uu____7 = m_w[s12];
+    uint64_t uu____8 = m_w[s14];
+    r20[0U] = m_w[s8];
+    r20[1U] = uu____6;
+    r20[2U] = uu____7;
+    r20[3U] = uu____8;
+    uint64_t uu____9 = m_w[s11];
+    uint64_t uu____10 = m_w[s13];
+    uint64_t uu____11 = m_w[s15];
+    r30[0U] = m_w[s9];
+    r30[1U] = uu____9;
+    r30[2U] = uu____10;
+    r30[3U] = uu____11;
+    uint64_t *x = m_st;
+    uint64_t *y = m_st + 4U;
+    uint64_t *z = m_st + 8U;
+    uint64_t *w = m_st + 12U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    uint64_t *wv_a0 = wv + a * 4U;
+    uint64_t *wv_b0 = wv + b0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a0;
+      uint64_t x1 = wv_a0[i] + wv_b0[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a0;
+      uint64_t x1 = wv_a0[i] + x[i];
+      os[i] = x1;);
+    uint64_t *wv_a1 = wv + d10 * 4U;
+    uint64_t *wv_b1 = wv + a * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a1;
+      uint64_t x1 = wv_a1[i] ^ wv_b1[i];
+      os[i] = x1;);
+    uint64_t *r10 = wv_a1;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r10;
+      uint64_t x1 = r10[i];
+      uint64_t x10 = x1 >> 32U | x1 << 32U;
+      os[i] = x10;);
+    uint64_t *wv_a2 = wv + c0 * 4U;
+    uint64_t *wv_b2 = wv + d10 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a2;
+      uint64_t x1 = wv_a2[i] + wv_b2[i];
+      os[i] = x1;);
+    uint64_t *wv_a3 = wv + b0 * 4U;
+    uint64_t *wv_b3 = wv + c0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a3;
+      uint64_t x1 = wv_a3[i] ^ wv_b3[i];
+      os[i] = x1;);
+    uint64_t *r12 = wv_a3;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r12;
+      uint64_t x1 = r12[i];
+      uint64_t x10 = x1 >> 24U | x1 << 40U;
+      os[i] = x10;);
+    uint64_t *wv_a4 = wv + a * 4U;
+    uint64_t *wv_b4 = wv + b0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a4;
+      uint64_t x1 = wv_a4[i] + wv_b4[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a4;
+      uint64_t x1 = wv_a4[i] + y[i];
+      os[i] = x1;);
+    uint64_t *wv_a5 = wv + d10 * 4U;
+    uint64_t *wv_b5 = wv + a * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a5;
+      uint64_t x1 = wv_a5[i] ^ wv_b5[i];
+      os[i] = x1;);
+    uint64_t *r13 = wv_a5;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r13;
+      uint64_t x1 = r13[i];
+      uint64_t x10 = x1 >> 16U | x1 << 48U;
+      os[i] = x10;);
+    uint64_t *wv_a6 = wv + c0 * 4U;
+    uint64_t *wv_b6 = wv + d10 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a6;
+      uint64_t x1 = wv_a6[i] + wv_b6[i];
+      os[i] = x1;);
+    uint64_t *wv_a7 = wv + b0 * 4U;
+    uint64_t *wv_b7 = wv + c0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a7;
+      uint64_t x1 = wv_a7[i] ^ wv_b7[i];
+      os[i] = x1;);
+    uint64_t *r14 = wv_a7;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r14;
+      uint64_t x1 = r14[i];
+      uint64_t x10 = x1 >> 63U | x1 << 1U;
+      os[i] = x10;);
+    uint64_t *r15 = wv + 4U;
+    uint64_t *r21 = wv + 8U;
+    uint64_t *r31 = wv + 12U;
+    uint64_t *r110 = r15;
+    uint64_t x00 = r110[1U];
+    uint64_t x10 = r110[2U];
+    uint64_t x20 = r110[3U];
+    uint64_t x30 = r110[0U];
+    r110[0U] = x00;
+    r110[1U] = x10;
+    r110[2U] = x20;
+    r110[3U] = x30;
+    uint64_t *r111 = r21;
+    uint64_t x01 = r111[2U];
+    uint64_t x11 = r111[3U];
+    uint64_t x21 = r111[0U];
+    uint64_t x31 = r111[1U];
+    r111[0U] = x01;
+    r111[1U] = x11;
+    r111[2U] = x21;
+    r111[3U] = x31;
+    uint64_t *r112 = r31;
+    uint64_t x02 = r112[3U];
+    uint64_t x12 = r112[0U];
+    uint64_t x22 = r112[1U];
+    uint64_t x32 = r112[2U];
+    r112[0U] = x02;
+    r112[1U] = x12;
+    r112[2U] = x22;
+    r112[3U] = x32;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    uint64_t *wv_a = wv + a0 * 4U;
+    uint64_t *wv_b8 = wv + b * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a;
+      uint64_t x1 = wv_a[i] + wv_b8[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a;
+      uint64_t x1 = wv_a[i] + z[i];
+      os[i] = x1;);
+    uint64_t *wv_a8 = wv + d1 * 4U;
+    uint64_t *wv_b9 = wv + a0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a8;
+      uint64_t x1 = wv_a8[i] ^ wv_b9[i];
+      os[i] = x1;);
+    uint64_t *r16 = wv_a8;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r16;
+      uint64_t x1 = r16[i];
+      uint64_t x13 = x1 >> 32U | x1 << 32U;
+      os[i] = x13;);
+    uint64_t *wv_a9 = wv + c * 4U;
+    uint64_t *wv_b10 = wv + d1 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a9;
+      uint64_t x1 = wv_a9[i] + wv_b10[i];
+      os[i] = x1;);
+    uint64_t *wv_a10 = wv + b * 4U;
+    uint64_t *wv_b11 = wv + c * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a10;
+      uint64_t x1 = wv_a10[i] ^ wv_b11[i];
+      os[i] = x1;);
+    uint64_t *r17 = wv_a10;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r17;
+      uint64_t x1 = r17[i];
+      uint64_t x13 = x1 >> 24U | x1 << 40U;
+      os[i] = x13;);
+    uint64_t *wv_a11 = wv + a0 * 4U;
+    uint64_t *wv_b12 = wv + b * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a11;
+      uint64_t x1 = wv_a11[i] + wv_b12[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a11;
+      uint64_t x1 = wv_a11[i] + w[i];
+      os[i] = x1;);
+    uint64_t *wv_a12 = wv + d1 * 4U;
+    uint64_t *wv_b13 = wv + a0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a12;
+      uint64_t x1 = wv_a12[i] ^ wv_b13[i];
+      os[i] = x1;);
+    uint64_t *r18 = wv_a12;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r18;
+      uint64_t x1 = r18[i];
+      uint64_t x13 = x1 >> 16U | x1 << 48U;
+      os[i] = x13;);
+    uint64_t *wv_a13 = wv + c * 4U;
+    uint64_t *wv_b14 = wv + d1 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a13;
+      uint64_t x1 = wv_a13[i] + wv_b14[i];
+      os[i] = x1;);
+    uint64_t *wv_a14 = wv + b * 4U;
+    uint64_t *wv_b = wv + c * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = wv_a14;
+      uint64_t x1 = wv_a14[i] ^ wv_b[i];
+      os[i] = x1;);
+    uint64_t *r19 = wv_a14;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint64_t *os = r19;
+      uint64_t x1 = r19[i];
+      uint64_t x13 = x1 >> 63U | x1 << 1U;
+      os[i] = x13;);
+    uint64_t *r113 = wv + 4U;
+    uint64_t *r2 = wv + 8U;
+    uint64_t *r3 = wv + 12U;
+    uint64_t *r11 = r113;
+    uint64_t x03 = r11[3U];
+    uint64_t x13 = r11[0U];
+    uint64_t x23 = r11[1U];
+    uint64_t x33 = r11[2U];
+    r11[0U] = x03;
+    r11[1U] = x13;
+    r11[2U] = x23;
+    r11[3U] = x33;
+    uint64_t *r114 = r2;
+    uint64_t x04 = r114[2U];
+    uint64_t x14 = r114[3U];
+    uint64_t x24 = r114[0U];
+    uint64_t x34 = r114[1U];
+    r114[0U] = x04;
+    r114[1U] = x14;
+    r114[2U] = x24;
+    r114[3U] = x34;
+    uint64_t *r115 = r3;
+    uint64_t x0 = r115[1U];
+    uint64_t x1 = r115[2U];
+    uint64_t x2 = r115[3U];
+    uint64_t x3 = r115[0U];
+    r115[0U] = x0;
+    r115[1U] = x1;
+    r115[2U] = x2;
+    r115[3U] = x3;);
+  uint64_t *s0 = hash;
+  uint64_t *s1 = hash + 4U;
+  uint64_t *r0 = wv;
+  uint64_t *r1 = wv + 4U;
+  uint64_t *r2 = wv + 8U;
+  uint64_t *r3 = wv + 12U;
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = s0;
+    uint64_t x = s0[i] ^ r0[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = s0;
+    uint64_t x = s0[i] ^ r2[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = s1;
+    uint64_t x = s1[i] ^ r1[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = s1;
+    uint64_t x = s1[i] ^ r3[i];
+    os[i] = x;);
+}
+
+void Hacl_Hash_Blake2b_init(uint64_t *hash, uint32_t kk, uint32_t nn)
+{
+  uint64_t *r0 = hash;
+  uint64_t *r1 = hash + 4U;
+  uint64_t *r2 = hash + 8U;
+  uint64_t *r3 = hash + 12U;
+  uint64_t iv0 = Hacl_Hash_Blake2s_ivTable_B[0U];
+  uint64_t iv1 = Hacl_Hash_Blake2s_ivTable_B[1U];
+  uint64_t iv2 = Hacl_Hash_Blake2s_ivTable_B[2U];
+  uint64_t iv3 = Hacl_Hash_Blake2s_ivTable_B[3U];
+  uint64_t iv4 = Hacl_Hash_Blake2s_ivTable_B[4U];
+  uint64_t iv5 = Hacl_Hash_Blake2s_ivTable_B[5U];
+  uint64_t iv6 = Hacl_Hash_Blake2s_ivTable_B[6U];
+  uint64_t iv7 = Hacl_Hash_Blake2s_ivTable_B[7U];
+  r2[0U] = iv0;
+  r2[1U] = iv1;
+  r2[2U] = iv2;
+  r2[3U] = iv3;
+  r3[0U] = iv4;
+  r3[1U] = iv5;
+  r3[2U] = iv6;
+  r3[3U] = iv7;
+  uint64_t kk_shift_8 = (uint64_t)kk << 8U;
+  uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn));
+  r0[0U] = iv0_;
+  r0[1U] = iv1;
+  r0[2U] = iv2;
+  r0[3U] = iv3;
+  r1[0U] = iv4;
+  r1[1U] = iv5;
+  r1[2U] = iv6;
+  r1[3U] = iv7;
+}
+
+static void update_key(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll)
+{
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  uint8_t b[128U] = { 0U };
+  memcpy(b, k, kk * sizeof (uint8_t));
+  if (ll == 0U)
+  {
+    update_block(wv, hash, true, lb, b);
+  }
+  else
+  {
+    update_block(wv, hash, false, lb, b);
+  }
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2b_update_multi(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks,
+  uint32_t nb
+)
+{
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    FStar_UInt128_uint128
+    totlen =
+      FStar_UInt128_add_mod(prev,
+        FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U)));
+    uint8_t *b = blocks + i * 128U;
+    update_block(wv, hash, false, totlen, b);
+  }
+}
+
+void
+Hacl_Hash_Blake2b_update_last(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint32_t rem,
+  uint8_t *d
+)
+{
+  uint8_t b[128U] = { 0U };
+  uint8_t *last = d + len - rem;
+  memcpy(b, last, rem * sizeof (uint8_t));
+  FStar_UInt128_uint128
+  totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
+  update_block(wv, hash, true, totlen, b);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
+}
+
+static void
+update_blocks(
+  uint32_t len,
+  uint64_t *wv,
+  uint64_t *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks
+)
+{
+  uint32_t nb0 = len / 128U;
+  uint32_t rem0 = len % 128U;
+  uint32_t nb;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    nb = nb0 - 1U;
+  }
+  else
+  {
+    nb = nb0;
+  }
+  uint32_t rem;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    rem = 128U;
+  }
+  else
+  {
+    rem = rem0;
+  }
+  Hacl_Hash_Blake2b_update_multi(len, wv, hash, prev, blocks, nb);
+  Hacl_Hash_Blake2b_update_last(len, wv, hash, prev, rem, blocks);
+}
+
+static inline void
+update(uint64_t *wv, uint64_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
+{
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  if (kk > 0U)
+  {
+    update_key(wv, hash, kk, k, ll);
+    if (!(ll == 0U))
+    {
+      update_blocks(ll, wv, hash, lb, d);
+      return;
+    }
+    return;
+  }
+  update_blocks(ll, wv, hash, FStar_UInt128_uint64_to_uint128((uint64_t)0U), d);
+}
+
+void Hacl_Hash_Blake2b_finish(uint32_t nn, uint8_t *output, uint64_t *hash)
+{
+  uint8_t b[64U] = { 0U };
+  uint8_t *first = b;
+  uint8_t *second = b + 32U;
+  uint64_t *row0 = hash;
+  uint64_t *row1 = hash + 4U;
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(first + i * 8U, row0[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_le(second + i * 8U, row1[i]););
+  uint8_t *final = b;
+  memcpy(output, final, nn * sizeof (uint8_t));
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2b_state_t *Hacl_Hash_Blake2b_malloc(void)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
+  uint64_t *b = (uint64_t *)KRML_HOST_CALLOC(16U, sizeof (uint64_t));
+  Hacl_Hash_Blake2b_block_state_t block_state = { .fst = wv, .snd = b };
+  Hacl_Hash_Blake2b_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  Hacl_Hash_Blake2b_state_t
+  *p = (Hacl_Hash_Blake2b_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2b_state_t));
+  p[0U] = s;
+  Hacl_Hash_Blake2b_init(block_state.snd, 0U, 64U);
+  return p;
+}
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2b_reset(Hacl_Hash_Blake2b_state_t *state)
+{
+  Hacl_Hash_Blake2b_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state;
+  Hacl_Hash_Blake2b_init(block_state.snd, 0U, 64U);
+  Hacl_Hash_Blake2b_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
+}
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2b_update(Hacl_Hash_Blake2b_state_t *state, uint8_t *chunk, uint32_t chunk_len)
+{
+  Hacl_Hash_Blake2b_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
+  {
+    sz = 128U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)128U);
+  }
+  if (chunk_len <= 128U - sz)
+  {
+    Hacl_Hash_Blake2b_state_t s1 = *state;
+    Hacl_Hash_Blake2b_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_Hash_Blake2b_state_t s1 = *state;
+    Hacl_Hash_Blake2b_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      uint64_t *wv = block_state1.fst;
+      uint64_t *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2b_update_multi(128U,
+        wv,
+        hash,
+        FStar_UInt128_uint64_to_uint128(prevlen),
+        buf,
+        nb);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)128U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 128U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)128U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    uint64_t *wv = block_state1.fst;
+    uint64_t *hash = block_state1.snd;
+    uint32_t nb = data1_len / 128U;
+    Hacl_Hash_Blake2b_update_multi(data1_len,
+      wv,
+      hash,
+      FStar_UInt128_uint64_to_uint128(total_len1),
+      data1,
+      nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 128U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_Blake2b_state_t s1 = *state;
+    Hacl_Hash_Blake2b_block_state_t block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 128U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2
+        }
+      );
+    Hacl_Hash_Blake2b_state_t s10 = *state;
+    Hacl_Hash_Blake2b_block_state_t block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      uint64_t *wv = block_state1.fst;
+      uint64_t *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2b_update_multi(128U,
+        wv,
+        hash,
+        FStar_UInt128_uint64_to_uint128(prevlen),
+        buf,
+        nb);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 128U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)128U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    uint64_t *wv = block_state1.fst;
+    uint64_t *hash = block_state1.snd;
+    uint32_t nb = data1_len / 128U;
+    Hacl_Hash_Blake2b_update_multi(data1_len,
+      wv,
+      hash,
+      FStar_UInt128_uint64_to_uint128(total_len1),
+      data1,
+      nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+/**
+  Finish function when there is no key
+*/
+void Hacl_Hash_Blake2b_digest(Hacl_Hash_Blake2b_state_t *state, uint8_t *output)
+{
+  Hacl_Hash_Blake2b_state_t scrut = *state;
+  Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint32_t r;
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
+  {
+    r = 128U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)128U);
+  }
+  uint8_t *buf_1 = buf_;
+  uint64_t wv0[16U] = { 0U };
+  uint64_t b[16U] = { 0U };
+  Hacl_Hash_Blake2b_block_state_t tmp_block_state = { .fst = wv0, .snd = b };
+  uint64_t *src_b = block_state.snd;
+  uint64_t *dst_b = tmp_block_state.snd;
+  memcpy(dst_b, src_b, 16U * sizeof (uint64_t));
+  uint64_t prev_len = total_len - (uint64_t)r;
+  uint32_t ite;
+  if (r % 128U == 0U && r > 0U)
+  {
+    ite = 128U;
+  }
+  else
+  {
+    ite = r % 128U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  uint64_t *wv1 = tmp_block_state.fst;
+  uint64_t *hash0 = tmp_block_state.snd;
+  uint32_t nb = 0U;
+  Hacl_Hash_Blake2b_update_multi(0U,
+    wv1,
+    hash0,
+    FStar_UInt128_uint64_to_uint128(prev_len),
+    buf_multi,
+    nb);
+  uint64_t prev_len_last = total_len - (uint64_t)r;
+  uint64_t *wv = tmp_block_state.fst;
+  uint64_t *hash = tmp_block_state.snd;
+  Hacl_Hash_Blake2b_update_last(r,
+    wv,
+    hash,
+    FStar_UInt128_uint64_to_uint128(prev_len_last),
+    r,
+    buf_last);
+  Hacl_Hash_Blake2b_finish(64U, output, tmp_block_state.snd);
+}
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2b_free(Hacl_Hash_Blake2b_state_t *state)
+{
+  Hacl_Hash_Blake2b_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2b_block_state_t block_state = scrut.block_state;
+  uint64_t *wv = block_state.fst;
+  uint64_t *b = block_state.snd;
+  KRML_HOST_FREE(wv);
+  KRML_HOST_FREE(b);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+/**
+Write the BLAKE2b digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2b_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+)
+{
+  uint64_t b[16U] = { 0U };
+  uint64_t b1[16U] = { 0U };
+  Hacl_Hash_Blake2b_init(b, key_len, output_len);
+  update(b1, b, key_len, key, input_len, input);
+  Hacl_Hash_Blake2b_finish(output_len, output, b);
+  Lib_Memzero0_memzero(b1, 16U, uint64_t);
+  Lib_Memzero0_memzero(b, 16U, uint64_t);
+}
+
diff --git a/src/msvc/Hacl_Hash_Blake2b_256.c b/src/msvc/Hacl_Hash_Blake2b_256.c
deleted file mode 100644
index b37ffc5f..00000000
--- a/src/msvc/Hacl_Hash_Blake2b_256.c
+++ /dev/null
@@ -1,499 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Hash_Blake2b_256.h"
-
-#include "internal/Hacl_Impl_Blake2_Constants.h"
-#include "internal/Hacl_Hash_Blake2.h"
-#include "lib_memzero0.h"
-
-static inline void
-blake2b_update_block(
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  bool flag,
-  FStar_UInt128_uint128 totlen,
-  uint8_t *d
-)
-{
-  uint64_t m_w[16U] = { 0U };
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint64_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_zero;
-  uint64_t wv_14;
-  if (flag)
-  {
-    wv_14 = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  }
-  else
-  {
-    wv_14 = (uint64_t)0U;
-  }
-  uint64_t wv_15 = (uint64_t)0U;
-  mask =
-    Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen),
-      FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, (uint32_t)64U)),
-      wv_14,
-      wv_15);
-  memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Lib_IntVector_Intrinsics_vec256 *wv3 = wv + (uint32_t)3U;
-  wv3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv3[0U], mask);
-  KRML_MAYBE_FOR12(i,
-    (uint32_t)0U,
-    (uint32_t)12U,
-    (uint32_t)1U,
-    uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U;
-    KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 m_st[4U] KRML_POST_ALIGN(32) = { 0U };
-    Lib_IntVector_Intrinsics_vec256 *r0 = m_st;
-    Lib_IntVector_Intrinsics_vec256 *r1 = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r20 = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r30 = m_st + (uint32_t)3U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
-    r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
-    r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
-    r20[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
-    r30[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
-    Lib_IntVector_Intrinsics_vec256 *x = m_st;
-    Lib_IntVector_Intrinsics_vec256 *y = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *z = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *w = m_st + (uint32_t)3U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * (uint32_t)1U;
-    wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]);
-    wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * (uint32_t)1U;
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]);
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], (uint32_t)32U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * (uint32_t)1U;
-    wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * (uint32_t)1U;
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]);
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], (uint32_t)24U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * (uint32_t)1U;
-    wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]);
-    wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * (uint32_t)1U;
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]);
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * (uint32_t)1U;
-    wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * (uint32_t)1U;
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]);
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], (uint32_t)63U);
-    Lib_IntVector_Intrinsics_vec256 *r10 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r21 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r31 = wv + (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 v00 = r10[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, (uint32_t)1U);
-    r10[0U] = v1;
-    Lib_IntVector_Intrinsics_vec256 v01 = r21[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, (uint32_t)2U);
-    r21[0U] = v10;
-    Lib_IntVector_Intrinsics_vec256 v02 = r31[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, (uint32_t)3U);
-    r31[0U] = v11;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * (uint32_t)1U;
-    wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]);
-    wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * (uint32_t)1U;
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]);
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], (uint32_t)32U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * (uint32_t)1U;
-    wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * (uint32_t)1U;
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]);
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], (uint32_t)24U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * (uint32_t)1U;
-    wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]);
-    wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * (uint32_t)1U;
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]);
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * (uint32_t)1U;
-    wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]);
-    Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * (uint32_t)1U;
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]);
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], (uint32_t)63U);
-    Lib_IntVector_Intrinsics_vec256 *r11 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec256 v0 = r11[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, (uint32_t)3U);
-    r11[0U] = v12;
-    Lib_IntVector_Intrinsics_vec256 v03 = r2[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, (uint32_t)2U);
-    r2[0U] = v13;
-    Lib_IntVector_Intrinsics_vec256 v04 = r3[0U];
-    Lib_IntVector_Intrinsics_vec256
-    v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, (uint32_t)1U);
-    r3[0U] = v14;);
-  Lib_IntVector_Intrinsics_vec256 *s0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *s1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r0 = wv;
-  Lib_IntVector_Intrinsics_vec256 *r1 = wv + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = wv + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = wv + (uint32_t)3U;
-  s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r0[0U]);
-  s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r2[0U]);
-  s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r1[0U]);
-  s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r3[0U]);
-}
-
-void
-Hacl_Blake2b_256_blake2b_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn)
-{
-  Lib_IntVector_Intrinsics_vec256 *r0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *r1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = hash + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = hash + (uint32_t)3U;
-  uint64_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_B[0U];
-  uint64_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_B[1U];
-  uint64_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_B[2U];
-  uint64_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_B[3U];
-  uint64_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_B[4U];
-  uint64_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_B[5U];
-  uint64_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_B[6U];
-  uint64_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_B[7U];
-  r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3);
-  r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
-  uint64_t kk_shift_8 = (uint64_t)kk << (uint32_t)8U;
-  uint64_t iv0_ = iv0 ^ ((uint64_t)0x01010000U ^ (kk_shift_8 ^ (uint64_t)nn));
-  r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3);
-  r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
-}
-
-void
-Hacl_Blake2b_256_blake2b_update_key(
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-)
-{
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  uint8_t b[128U] = { 0U };
-  memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
-  {
-    blake2b_update_block(wv, hash, true, lb, b);
-  }
-  else
-  {
-    blake2b_update_block(wv, hash, false, lb, b);
-  }
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
-}
-
-void
-Hacl_Blake2b_256_blake2b_update_multi(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks,
-  uint32_t nb
-)
-{
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    FStar_UInt128_uint128
-    totlen =
-      FStar_UInt128_add_mod(prev,
-        FStar_UInt128_uint64_to_uint128((uint64_t)((i + (uint32_t)1U) * (uint32_t)128U)));
-    uint8_t *b = blocks + i * (uint32_t)128U;
-    blake2b_update_block(wv, hash, false, totlen, b);
-  }
-}
-
-void
-Hacl_Blake2b_256_blake2b_update_last(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  FStar_UInt128_uint128 prev,
-  uint32_t rem,
-  uint8_t *d
-)
-{
-  uint8_t b[128U] = { 0U };
-  uint8_t *last = d + len - rem;
-  memcpy(b, last, rem * sizeof (uint8_t));
-  FStar_UInt128_uint128
-  totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
-  blake2b_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)128U, uint8_t);
-}
-
-static inline void
-blake2b_update_blocks(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  FStar_UInt128_uint128 prev,
-  uint8_t *blocks
-)
-{
-  uint32_t nb0 = len / (uint32_t)128U;
-  uint32_t rem0 = len % (uint32_t)128U;
-  K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
-  {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)128U;
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
-  }
-  else
-  {
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb0, .snd = rem0 });
-  }
-  uint32_t nb = scrut.fst;
-  uint32_t rem = scrut.snd;
-  Hacl_Blake2b_256_blake2b_update_multi(len, wv, hash, prev, blocks, nb);
-  Hacl_Blake2b_256_blake2b_update_last(len, wv, hash, prev, rem, blocks);
-}
-
-static inline void
-blake2b_update(
-  Lib_IntVector_Intrinsics_vec256 *wv,
-  Lib_IntVector_Intrinsics_vec256 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll,
-  uint8_t *d
-)
-{
-  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)128U);
-  if (kk > (uint32_t)0U)
-  {
-    Hacl_Blake2b_256_blake2b_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
-    {
-      blake2b_update_blocks(ll, wv, hash, lb, d);
-      return;
-    }
-    return;
-  }
-  blake2b_update_blocks(ll,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128((uint64_t)(uint32_t)0U),
-    d);
-}
-
-void
-Hacl_Blake2b_256_blake2b_finish(
-  uint32_t nn,
-  uint8_t *output,
-  Lib_IntVector_Intrinsics_vec256 *hash
-)
-{
-  uint8_t b[64U] = { 0U };
-  uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)32U;
-  Lib_IntVector_Intrinsics_vec256 *row0 = hash;
-  Lib_IntVector_Intrinsics_vec256 *row1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256_store64_le(first, row0[0U]);
-  Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]);
-  uint8_t *final = b;
-  memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-/**
-Write the BLAKE2b digest of message `d` using key `k` into `output`.
-
-@param nn Length of the to-be-generated digest with 1 <= `nn` <= 64.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2b_256_blake2b(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-)
-{
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U };
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b1[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Blake2b_256_blake2b_init(b, kk, nn);
-  blake2b_update(b1, b, kk, k, ll, d);
-  Hacl_Blake2b_256_blake2b_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)4U, Lib_IntVector_Intrinsics_vec256);
-  Lib_Memzero0_memzero(b, (uint32_t)4U, Lib_IntVector_Intrinsics_vec256);
-}
-
-void
-Hacl_Blake2b_256_load_state256b_from_state32(
-  Lib_IntVector_Intrinsics_vec256 *st,
-  uint64_t *st32
-)
-{
-  Lib_IntVector_Intrinsics_vec256 *r0 = st;
-  Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U;
-  uint64_t *b0 = st32;
-  uint64_t *b1 = st32 + (uint32_t)4U;
-  uint64_t *b2 = st32 + (uint32_t)8U;
-  uint64_t *b3 = st32 + (uint32_t)12U;
-  r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b0[0U], b0[1U], b0[2U], b0[3U]);
-  r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b1[0U], b1[1U], b1[2U], b1[3U]);
-  r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b2[0U], b2[1U], b2[2U], b2[3U]);
-  r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b3[0U], b3[1U], b3[2U], b3[3U]);
-}
-
-void
-Hacl_Blake2b_256_store_state256b_to_state32(
-  uint64_t *st32,
-  Lib_IntVector_Intrinsics_vec256 *st
-)
-{
-  Lib_IntVector_Intrinsics_vec256 *r0 = st;
-  Lib_IntVector_Intrinsics_vec256 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec256 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec256 *r3 = st + (uint32_t)3U;
-  uint64_t *b0 = st32;
-  uint64_t *b1 = st32 + (uint32_t)4U;
-  uint64_t *b2 = st32 + (uint32_t)8U;
-  uint64_t *b3 = st32 + (uint32_t)12U;
-  uint8_t b8[32U] = { 0U };
-  Lib_IntVector_Intrinsics_vec256_store64_le(b8, r0[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = b0;
-    uint8_t *bj = b8 + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  uint8_t b80[32U] = { 0U };
-  Lib_IntVector_Intrinsics_vec256_store64_le(b80, r1[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = b1;
-    uint8_t *bj = b80 + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  uint8_t b81[32U] = { 0U };
-  Lib_IntVector_Intrinsics_vec256_store64_le(b81, r2[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = b2;
-    uint8_t *bj = b81 + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-  uint8_t b82[32U] = { 0U };
-  Lib_IntVector_Intrinsics_vec256_store64_le(b82, r3[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t *os = b3;
-    uint8_t *bj = b82 + i * (uint32_t)8U;
-    uint64_t u = load64_le(bj);
-    uint64_t r = u;
-    uint64_t x = r;
-    os[i] = x;);
-}
-
-Lib_IntVector_Intrinsics_vec256 *Hacl_Blake2b_256_blake2b_malloc(void)
-{
-  Lib_IntVector_Intrinsics_vec256
-  *buf =
-    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  return buf;
-}
-
diff --git a/src/msvc/Hacl_Hash_Blake2b_Simd256.c b/src/msvc/Hacl_Hash_Blake2b_Simd256.c
new file mode 100644
index 00000000..1a5e8cf2
--- /dev/null
+++ b/src/msvc/Hacl_Hash_Blake2b_Simd256.c
@@ -0,0 +1,828 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_Blake2b_Simd256.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "lib_memzero0.h"
+
+static inline void
+update_block(
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  bool flag,
+  FStar_UInt128_uint128 totlen,
+  uint8_t *d
+)
+{
+  uint64_t m_w[16U] = { 0U };
+  KRML_MAYBE_FOR16(i,
+    0U,
+    16U,
+    1U,
+    uint64_t *os = m_w;
+    uint8_t *bj = d + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_zero;
+  uint64_t wv_14;
+  if (flag)
+  {
+    wv_14 = 0xFFFFFFFFFFFFFFFFULL;
+  }
+  else
+  {
+    wv_14 = 0ULL;
+  }
+  uint64_t wv_15 = 0ULL;
+  mask =
+    Lib_IntVector_Intrinsics_vec256_load64s(FStar_UInt128_uint128_to_uint64(totlen),
+      FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(totlen, 64U)),
+      wv_14,
+      wv_15);
+  memcpy(wv, hash, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Lib_IntVector_Intrinsics_vec256 *wv3 = wv + 3U;
+  wv3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv3[0U], mask);
+  KRML_MAYBE_FOR12(i,
+    0U,
+    12U,
+    1U,
+    uint32_t start_idx = i % 10U * 16U;
+    KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 m_st[4U] KRML_POST_ALIGN(32) = { 0U };
+    Lib_IntVector_Intrinsics_vec256 *r0 = m_st;
+    Lib_IntVector_Intrinsics_vec256 *r1 = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r20 = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r30 = m_st + 3U;
+    uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U];
+    r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
+    r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
+    r20[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
+    r30[0U] = Lib_IntVector_Intrinsics_vec256_load64s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
+    Lib_IntVector_Intrinsics_vec256 *x = m_st;
+    Lib_IntVector_Intrinsics_vec256 *y = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec256 *z = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec256 *w = m_st + 3U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a0 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b0 = wv + b0 * 1U;
+    wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], wv_b0[0U]);
+    wv_a0[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a0[0U], x[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a1 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b1 = wv + a * 1U;
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a1[0U], wv_b1[0U]);
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a1[0U], 32U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a2 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b2 = wv + d10 * 1U;
+    wv_a2[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a2[0U], wv_b2[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a3 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b3 = wv + c0 * 1U;
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a3[0U], wv_b3[0U]);
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a3[0U], 24U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a4 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b4 = wv + b0 * 1U;
+    wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], wv_b4[0U]);
+    wv_a4[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a4[0U], y[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a5 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b5 = wv + a * 1U;
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a5[0U], wv_b5[0U]);
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a5[0U], 16U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a6 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b6 = wv + d10 * 1U;
+    wv_a6[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a6[0U], wv_b6[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a7 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b7 = wv + c0 * 1U;
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a7[0U], wv_b7[0U]);
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a7[0U], 63U);
+    Lib_IntVector_Intrinsics_vec256 *r10 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r21 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r31 = wv + 3U;
+    Lib_IntVector_Intrinsics_vec256 v00 = r10[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v1 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v00, 1U);
+    r10[0U] = v1;
+    Lib_IntVector_Intrinsics_vec256 v01 = r21[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v10 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v01, 2U);
+    r21[0U] = v10;
+    Lib_IntVector_Intrinsics_vec256 v02 = r31[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v11 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v02, 3U);
+    r31[0U] = v11;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    Lib_IntVector_Intrinsics_vec256 *wv_a = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b8 = wv + b * 1U;
+    wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], wv_b8[0U]);
+    wv_a[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a[0U], z[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a8 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b9 = wv + a0 * 1U;
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a8[0U], wv_b9[0U]);
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a8[0U], 32U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a9 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b10 = wv + d1 * 1U;
+    wv_a9[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a9[0U], wv_b10[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a10 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b11 = wv + c * 1U;
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a10[0U], wv_b11[0U]);
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a10[0U], 24U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a11 = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b12 = wv + b * 1U;
+    wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], wv_b12[0U]);
+    wv_a11[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a11[0U], w[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a12 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b13 = wv + a0 * 1U;
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a12[0U], wv_b13[0U]);
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a12[0U], 16U);
+    Lib_IntVector_Intrinsics_vec256 *wv_a13 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b14 = wv + d1 * 1U;
+    wv_a13[0U] = Lib_IntVector_Intrinsics_vec256_add64(wv_a13[0U], wv_b14[0U]);
+    Lib_IntVector_Intrinsics_vec256 *wv_a14 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec256 *wv_b = wv + c * 1U;
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_xor(wv_a14[0U], wv_b[0U]);
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec256_rotate_right64(wv_a14[0U], 63U);
+    Lib_IntVector_Intrinsics_vec256 *r11 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec256 *r2 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec256 *r3 = wv + 3U;
+    Lib_IntVector_Intrinsics_vec256 v0 = r11[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v12 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v0, 3U);
+    r11[0U] = v12;
+    Lib_IntVector_Intrinsics_vec256 v03 = r2[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v13 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v03, 2U);
+    r2[0U] = v13;
+    Lib_IntVector_Intrinsics_vec256 v04 = r3[0U];
+    Lib_IntVector_Intrinsics_vec256
+    v14 = Lib_IntVector_Intrinsics_vec256_rotate_right_lanes64(v04, 1U);
+    r3[0U] = v14;);
+  Lib_IntVector_Intrinsics_vec256 *s0 = hash;
+  Lib_IntVector_Intrinsics_vec256 *s1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r0 = wv;
+  Lib_IntVector_Intrinsics_vec256 *r1 = wv + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = wv + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = wv + 3U;
+  s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r0[0U]);
+  s0[0U] = Lib_IntVector_Intrinsics_vec256_xor(s0[0U], r2[0U]);
+  s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r1[0U]);
+  s1[0U] = Lib_IntVector_Intrinsics_vec256_xor(s1[0U], r3[0U]);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_init(Lib_IntVector_Intrinsics_vec256 *hash, uint32_t kk, uint32_t nn)
+{
+  Lib_IntVector_Intrinsics_vec256 *r0 = hash;
+  Lib_IntVector_Intrinsics_vec256 *r1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = hash + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = hash + 3U;
+  uint64_t iv0 = Hacl_Hash_Blake2s_ivTable_B[0U];
+  uint64_t iv1 = Hacl_Hash_Blake2s_ivTable_B[1U];
+  uint64_t iv2 = Hacl_Hash_Blake2s_ivTable_B[2U];
+  uint64_t iv3 = Hacl_Hash_Blake2s_ivTable_B[3U];
+  uint64_t iv4 = Hacl_Hash_Blake2s_ivTable_B[4U];
+  uint64_t iv5 = Hacl_Hash_Blake2s_ivTable_B[5U];
+  uint64_t iv6 = Hacl_Hash_Blake2s_ivTable_B[6U];
+  uint64_t iv7 = Hacl_Hash_Blake2s_ivTable_B[7U];
+  r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0, iv1, iv2, iv3);
+  r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
+  uint64_t kk_shift_8 = (uint64_t)kk << 8U;
+  uint64_t iv0_ = iv0 ^ (0x01010000ULL ^ (kk_shift_8 ^ (uint64_t)nn));
+  r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv0_, iv1, iv2, iv3);
+  r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(iv4, iv5, iv6, iv7);
+}
+
+static void
+update_key(
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  uint32_t kk,
+  uint8_t *k,
+  uint32_t ll
+)
+{
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  uint8_t b[128U] = { 0U };
+  memcpy(b, k, kk * sizeof (uint8_t));
+  if (ll == 0U)
+  {
+    update_block(wv, hash, true, lb, b);
+  }
+  else
+  {
+    update_block(wv, hash, false, lb, b);
+  }
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_update_multi(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks,
+  uint32_t nb
+)
+{
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    FStar_UInt128_uint128
+    totlen =
+      FStar_UInt128_add_mod(prev,
+        FStar_UInt128_uint64_to_uint128((uint64_t)((i + 1U) * 128U)));
+    uint8_t *b = blocks + i * 128U;
+    update_block(wv, hash, false, totlen, b);
+  }
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_update_last(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  FStar_UInt128_uint128 prev,
+  uint32_t rem,
+  uint8_t *d
+)
+{
+  uint8_t b[128U] = { 0U };
+  uint8_t *last = d + len - rem;
+  memcpy(b, last, rem * sizeof (uint8_t));
+  FStar_UInt128_uint128
+  totlen = FStar_UInt128_add_mod(prev, FStar_UInt128_uint64_to_uint128((uint64_t)len));
+  update_block(wv, hash, true, totlen, b);
+  Lib_Memzero0_memzero(b, 128U, uint8_t);
+}
+
+static inline void
+update_blocks(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  FStar_UInt128_uint128 prev,
+  uint8_t *blocks
+)
+{
+  uint32_t nb0 = len / 128U;
+  uint32_t rem0 = len % 128U;
+  uint32_t nb;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    nb = nb0 - 1U;
+  }
+  else
+  {
+    nb = nb0;
+  }
+  uint32_t rem;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    rem = 128U;
+  }
+  else
+  {
+    rem = rem0;
+  }
+  Hacl_Hash_Blake2b_Simd256_update_multi(len, wv, hash, prev, blocks, nb);
+  Hacl_Hash_Blake2b_Simd256_update_last(len, wv, hash, prev, rem, blocks);
+}
+
+static inline void
+update(
+  Lib_IntVector_Intrinsics_vec256 *wv,
+  Lib_IntVector_Intrinsics_vec256 *hash,
+  uint32_t kk,
+  uint8_t *k,
+  uint32_t ll,
+  uint8_t *d
+)
+{
+  FStar_UInt128_uint128 lb = FStar_UInt128_uint64_to_uint128((uint64_t)128U);
+  if (kk > 0U)
+  {
+    update_key(wv, hash, kk, k, ll);
+    if (!(ll == 0U))
+    {
+      update_blocks(ll, wv, hash, lb, d);
+      return;
+    }
+    return;
+  }
+  update_blocks(ll, wv, hash, FStar_UInt128_uint64_to_uint128((uint64_t)0U), d);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_finish(
+  uint32_t nn,
+  uint8_t *output,
+  Lib_IntVector_Intrinsics_vec256 *hash
+)
+{
+  uint8_t b[64U] = { 0U };
+  uint8_t *first = b;
+  uint8_t *second = b + 32U;
+  Lib_IntVector_Intrinsics_vec256 *row0 = hash;
+  Lib_IntVector_Intrinsics_vec256 *row1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec256_store64_le(first, row0[0U]);
+  Lib_IntVector_Intrinsics_vec256_store64_le(second, row1[0U]);
+  uint8_t *final = b;
+  memcpy(output, final, nn * sizeof (uint8_t));
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_load_state256b_from_state32(
+  Lib_IntVector_Intrinsics_vec256 *st,
+  uint64_t *st32
+)
+{
+  Lib_IntVector_Intrinsics_vec256 *r0 = st;
+  Lib_IntVector_Intrinsics_vec256 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = st + 3U;
+  uint64_t *b0 = st32;
+  uint64_t *b1 = st32 + 4U;
+  uint64_t *b2 = st32 + 8U;
+  uint64_t *b3 = st32 + 12U;
+  r0[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b0[0U], b0[1U], b0[2U], b0[3U]);
+  r1[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b1[0U], b1[1U], b1[2U], b1[3U]);
+  r2[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b2[0U], b2[1U], b2[2U], b2[3U]);
+  r3[0U] = Lib_IntVector_Intrinsics_vec256_load64s(b3[0U], b3[1U], b3[2U], b3[3U]);
+}
+
+void
+Hacl_Hash_Blake2b_Simd256_store_state256b_to_state32(
+  uint64_t *st32,
+  Lib_IntVector_Intrinsics_vec256 *st
+)
+{
+  Lib_IntVector_Intrinsics_vec256 *r0 = st;
+  Lib_IntVector_Intrinsics_vec256 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec256 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec256 *r3 = st + 3U;
+  uint64_t *b0 = st32;
+  uint64_t *b1 = st32 + 4U;
+  uint64_t *b2 = st32 + 8U;
+  uint64_t *b3 = st32 + 12U;
+  uint8_t b8[32U] = { 0U };
+  Lib_IntVector_Intrinsics_vec256_store64_le(b8, r0[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = b0;
+    uint8_t *bj = b8 + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  uint8_t b80[32U] = { 0U };
+  Lib_IntVector_Intrinsics_vec256_store64_le(b80, r1[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = b1;
+    uint8_t *bj = b80 + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  uint8_t b81[32U] = { 0U };
+  Lib_IntVector_Intrinsics_vec256_store64_le(b81, r2[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = b2;
+    uint8_t *bj = b81 + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+  uint8_t b82[32U] = { 0U };
+  Lib_IntVector_Intrinsics_vec256_store64_le(b82, r3[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint64_t *os = b3;
+    uint8_t *bj = b82 + i * 8U;
+    uint64_t u = load64_le(bj);
+    uint64_t r = u;
+    uint64_t x = r;
+    os[i] = x;);
+}
+
+Lib_IntVector_Intrinsics_vec256 *Hacl_Hash_Blake2b_Simd256_malloc_with_key(void)
+{
+  Lib_IntVector_Intrinsics_vec256
+  *buf =
+    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(buf, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  return buf;
+}
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2b_Simd256_state_t *Hacl_Hash_Blake2b_Simd256_malloc(void)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  Lib_IntVector_Intrinsics_vec256
+  *wv =
+    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Lib_IntVector_Intrinsics_vec256
+  *b =
+    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 4U);
+  memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state = { .fst = wv, .snd = b };
+  Hacl_Hash_Blake2b_Simd256_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  Hacl_Hash_Blake2b_Simd256_state_t
+  *p =
+    (Hacl_Hash_Blake2b_Simd256_state_t *)KRML_HOST_MALLOC(sizeof (
+        Hacl_Hash_Blake2b_Simd256_state_t
+      ));
+  p[0U] = s;
+  Hacl_Hash_Blake2b_Simd256_init(block_state.snd, 0U, 64U);
+  return p;
+}
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2b_Simd256_reset(Hacl_Hash_Blake2b_Simd256_state_t *state)
+{
+  Hacl_Hash_Blake2b_Simd256_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state;
+  Hacl_Hash_Blake2b_Simd256_init(block_state.snd, 0U, 64U);
+  Hacl_Hash_Blake2b_Simd256_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
+}
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2b_Simd256_update(
+  Hacl_Hash_Blake2b_Simd256_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+)
+{
+  Hacl_Hash_Blake2b_Simd256_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
+  {
+    sz = 128U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)128U);
+  }
+  if (chunk_len <= 128U - sz)
+  {
+    Hacl_Hash_Blake2b_Simd256_state_t s1 = *state;
+    Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_Hash_Blake2b_Simd256_state_t s1 = *state;
+    Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
+      Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2b_Simd256_update_multi(128U,
+        wv,
+        hash,
+        FStar_UInt128_uint64_to_uint128(prevlen),
+        buf,
+        nb);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)128U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 128U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)128U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
+    Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
+    uint32_t nb = data1_len / 128U;
+    Hacl_Hash_Blake2b_Simd256_update_multi(data1_len,
+      wv,
+      hash,
+      FStar_UInt128_uint64_to_uint128(total_len1),
+      data1,
+      nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 128U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_Blake2b_Simd256_state_t s1 = *state;
+    Hacl_Hash_Blake2b_Simd256_block_state_t block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 128U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_Simd256_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2
+        }
+      );
+    Hacl_Hash_Blake2b_Simd256_state_t s10 = *state;
+    Hacl_Hash_Blake2b_Simd256_block_state_t block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 128U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
+      Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2b_Simd256_update_multi(128U,
+        wv,
+        hash,
+        FStar_UInt128_uint64_to_uint128(prevlen),
+        buf,
+        nb);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 128U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)128U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
+    Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
+    uint32_t nb = data1_len / 128U;
+    Hacl_Hash_Blake2b_Simd256_update_multi(data1_len,
+      wv,
+      hash,
+      FStar_UInt128_uint64_to_uint128(total_len1),
+      data1,
+      nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2b_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+/**
+  Finish function when there is no key
+*/
+void
+Hacl_Hash_Blake2b_Simd256_digest(Hacl_Hash_Blake2b_Simd256_state_t *state, uint8_t *output)
+{
+  Hacl_Hash_Blake2b_Simd256_state_t scrut = *state;
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint32_t r;
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
+  {
+    r = 128U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)128U);
+  }
+  uint8_t *buf_1 = buf_;
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U };
+  Hacl_Hash_Blake2b_Simd256_block_state_t tmp_block_state = { .fst = wv0, .snd = b };
+  Lib_IntVector_Intrinsics_vec256 *src_b = block_state.snd;
+  Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.snd;
+  memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  uint64_t prev_len = total_len - (uint64_t)r;
+  uint32_t ite;
+  if (r % 128U == 0U && r > 0U)
+  {
+    ite = 128U;
+  }
+  else
+  {
+    ite = r % 128U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  Lib_IntVector_Intrinsics_vec256 *wv1 = tmp_block_state.fst;
+  Lib_IntVector_Intrinsics_vec256 *hash0 = tmp_block_state.snd;
+  uint32_t nb = 0U;
+  Hacl_Hash_Blake2b_Simd256_update_multi(0U,
+    wv1,
+    hash0,
+    FStar_UInt128_uint64_to_uint128(prev_len),
+    buf_multi,
+    nb);
+  uint64_t prev_len_last = total_len - (uint64_t)r;
+  Lib_IntVector_Intrinsics_vec256 *wv = tmp_block_state.fst;
+  Lib_IntVector_Intrinsics_vec256 *hash = tmp_block_state.snd;
+  Hacl_Hash_Blake2b_Simd256_update_last(r,
+    wv,
+    hash,
+    FStar_UInt128_uint64_to_uint128(prev_len_last),
+    r,
+    buf_last);
+  Hacl_Hash_Blake2b_Simd256_finish(64U, output, tmp_block_state.snd);
+}
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2b_Simd256_free(Hacl_Hash_Blake2b_Simd256_state_t *state)
+{
+  Hacl_Hash_Blake2b_Simd256_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2b_Simd256_block_state_t block_state = scrut.block_state;
+  Lib_IntVector_Intrinsics_vec256 *wv = block_state.fst;
+  Lib_IntVector_Intrinsics_vec256 *b = block_state.snd;
+  KRML_ALIGNED_FREE(wv);
+  KRML_ALIGNED_FREE(b);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+/**
+Write the BLAKE2b digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 64.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2b_Simd256_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+)
+{
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U };
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b1[4U] KRML_POST_ALIGN(32) = { 0U };
+  Hacl_Hash_Blake2b_Simd256_init(b, key_len, output_len);
+  update(b1, b, key_len, key, input_len, input);
+  Hacl_Hash_Blake2b_Simd256_finish(output_len, output, b);
+  Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec256);
+  Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec256);
+}
+
diff --git a/src/msvc/Hacl_Hash_Blake2s.c b/src/msvc/Hacl_Hash_Blake2s.c
new file mode 100644
index 00000000..652c3f33
--- /dev/null
+++ b/src/msvc/Hacl_Hash_Blake2s.c
@@ -0,0 +1,931 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_Blake2s.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "lib_memzero0.h"
+
+static inline void
+update_block(uint32_t *wv, uint32_t *hash, bool flag, uint64_t totlen, uint8_t *d)
+{
+  uint32_t m_w[16U] = { 0U };
+  KRML_MAYBE_FOR16(i,
+    0U,
+    16U,
+    1U,
+    uint32_t *os = m_w;
+    uint8_t *bj = d + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  uint32_t mask[4U] = { 0U };
+  uint32_t wv_14;
+  if (flag)
+  {
+    wv_14 = 0xFFFFFFFFU;
+  }
+  else
+  {
+    wv_14 = 0U;
+  }
+  uint32_t wv_15 = 0U;
+  mask[0U] = (uint32_t)totlen;
+  mask[1U] = (uint32_t)(totlen >> 32U);
+  mask[2U] = wv_14;
+  mask[3U] = wv_15;
+  memcpy(wv, hash, 16U * sizeof (uint32_t));
+  uint32_t *wv3 = wv + 12U;
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = wv3;
+    uint32_t x = wv3[i] ^ mask[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR10(i0,
+    0U,
+    10U,
+    1U,
+    uint32_t start_idx = i0 % 10U * 16U;
+    uint32_t m_st[16U] = { 0U };
+    uint32_t *r0 = m_st;
+    uint32_t *r1 = m_st + 4U;
+    uint32_t *r20 = m_st + 8U;
+    uint32_t *r30 = m_st + 12U;
+    uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U];
+    uint32_t uu____0 = m_w[s2];
+    uint32_t uu____1 = m_w[s4];
+    uint32_t uu____2 = m_w[s6];
+    r0[0U] = m_w[s0];
+    r0[1U] = uu____0;
+    r0[2U] = uu____1;
+    r0[3U] = uu____2;
+    uint32_t uu____3 = m_w[s3];
+    uint32_t uu____4 = m_w[s5];
+    uint32_t uu____5 = m_w[s7];
+    r1[0U] = m_w[s1];
+    r1[1U] = uu____3;
+    r1[2U] = uu____4;
+    r1[3U] = uu____5;
+    uint32_t uu____6 = m_w[s10];
+    uint32_t uu____7 = m_w[s12];
+    uint32_t uu____8 = m_w[s14];
+    r20[0U] = m_w[s8];
+    r20[1U] = uu____6;
+    r20[2U] = uu____7;
+    r20[3U] = uu____8;
+    uint32_t uu____9 = m_w[s11];
+    uint32_t uu____10 = m_w[s13];
+    uint32_t uu____11 = m_w[s15];
+    r30[0U] = m_w[s9];
+    r30[1U] = uu____9;
+    r30[2U] = uu____10;
+    r30[3U] = uu____11;
+    uint32_t *x = m_st;
+    uint32_t *y = m_st + 4U;
+    uint32_t *z = m_st + 8U;
+    uint32_t *w = m_st + 12U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    uint32_t *wv_a0 = wv + a * 4U;
+    uint32_t *wv_b0 = wv + b0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a0;
+      uint32_t x1 = wv_a0[i] + wv_b0[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a0;
+      uint32_t x1 = wv_a0[i] + x[i];
+      os[i] = x1;);
+    uint32_t *wv_a1 = wv + d10 * 4U;
+    uint32_t *wv_b1 = wv + a * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a1;
+      uint32_t x1 = wv_a1[i] ^ wv_b1[i];
+      os[i] = x1;);
+    uint32_t *r10 = wv_a1;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r10;
+      uint32_t x1 = r10[i];
+      uint32_t x10 = x1 >> 16U | x1 << 16U;
+      os[i] = x10;);
+    uint32_t *wv_a2 = wv + c0 * 4U;
+    uint32_t *wv_b2 = wv + d10 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a2;
+      uint32_t x1 = wv_a2[i] + wv_b2[i];
+      os[i] = x1;);
+    uint32_t *wv_a3 = wv + b0 * 4U;
+    uint32_t *wv_b3 = wv + c0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a3;
+      uint32_t x1 = wv_a3[i] ^ wv_b3[i];
+      os[i] = x1;);
+    uint32_t *r12 = wv_a3;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r12;
+      uint32_t x1 = r12[i];
+      uint32_t x10 = x1 >> 12U | x1 << 20U;
+      os[i] = x10;);
+    uint32_t *wv_a4 = wv + a * 4U;
+    uint32_t *wv_b4 = wv + b0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a4;
+      uint32_t x1 = wv_a4[i] + wv_b4[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a4;
+      uint32_t x1 = wv_a4[i] + y[i];
+      os[i] = x1;);
+    uint32_t *wv_a5 = wv + d10 * 4U;
+    uint32_t *wv_b5 = wv + a * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a5;
+      uint32_t x1 = wv_a5[i] ^ wv_b5[i];
+      os[i] = x1;);
+    uint32_t *r13 = wv_a5;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r13;
+      uint32_t x1 = r13[i];
+      uint32_t x10 = x1 >> 8U | x1 << 24U;
+      os[i] = x10;);
+    uint32_t *wv_a6 = wv + c0 * 4U;
+    uint32_t *wv_b6 = wv + d10 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a6;
+      uint32_t x1 = wv_a6[i] + wv_b6[i];
+      os[i] = x1;);
+    uint32_t *wv_a7 = wv + b0 * 4U;
+    uint32_t *wv_b7 = wv + c0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a7;
+      uint32_t x1 = wv_a7[i] ^ wv_b7[i];
+      os[i] = x1;);
+    uint32_t *r14 = wv_a7;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r14;
+      uint32_t x1 = r14[i];
+      uint32_t x10 = x1 >> 7U | x1 << 25U;
+      os[i] = x10;);
+    uint32_t *r15 = wv + 4U;
+    uint32_t *r21 = wv + 8U;
+    uint32_t *r31 = wv + 12U;
+    uint32_t *r110 = r15;
+    uint32_t x00 = r110[1U];
+    uint32_t x10 = r110[2U];
+    uint32_t x20 = r110[3U];
+    uint32_t x30 = r110[0U];
+    r110[0U] = x00;
+    r110[1U] = x10;
+    r110[2U] = x20;
+    r110[3U] = x30;
+    uint32_t *r111 = r21;
+    uint32_t x01 = r111[2U];
+    uint32_t x11 = r111[3U];
+    uint32_t x21 = r111[0U];
+    uint32_t x31 = r111[1U];
+    r111[0U] = x01;
+    r111[1U] = x11;
+    r111[2U] = x21;
+    r111[3U] = x31;
+    uint32_t *r112 = r31;
+    uint32_t x02 = r112[3U];
+    uint32_t x12 = r112[0U];
+    uint32_t x22 = r112[1U];
+    uint32_t x32 = r112[2U];
+    r112[0U] = x02;
+    r112[1U] = x12;
+    r112[2U] = x22;
+    r112[3U] = x32;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    uint32_t *wv_a = wv + a0 * 4U;
+    uint32_t *wv_b8 = wv + b * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a;
+      uint32_t x1 = wv_a[i] + wv_b8[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a;
+      uint32_t x1 = wv_a[i] + z[i];
+      os[i] = x1;);
+    uint32_t *wv_a8 = wv + d1 * 4U;
+    uint32_t *wv_b9 = wv + a0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a8;
+      uint32_t x1 = wv_a8[i] ^ wv_b9[i];
+      os[i] = x1;);
+    uint32_t *r16 = wv_a8;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r16;
+      uint32_t x1 = r16[i];
+      uint32_t x13 = x1 >> 16U | x1 << 16U;
+      os[i] = x13;);
+    uint32_t *wv_a9 = wv + c * 4U;
+    uint32_t *wv_b10 = wv + d1 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a9;
+      uint32_t x1 = wv_a9[i] + wv_b10[i];
+      os[i] = x1;);
+    uint32_t *wv_a10 = wv + b * 4U;
+    uint32_t *wv_b11 = wv + c * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a10;
+      uint32_t x1 = wv_a10[i] ^ wv_b11[i];
+      os[i] = x1;);
+    uint32_t *r17 = wv_a10;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r17;
+      uint32_t x1 = r17[i];
+      uint32_t x13 = x1 >> 12U | x1 << 20U;
+      os[i] = x13;);
+    uint32_t *wv_a11 = wv + a0 * 4U;
+    uint32_t *wv_b12 = wv + b * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a11;
+      uint32_t x1 = wv_a11[i] + wv_b12[i];
+      os[i] = x1;);
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a11;
+      uint32_t x1 = wv_a11[i] + w[i];
+      os[i] = x1;);
+    uint32_t *wv_a12 = wv + d1 * 4U;
+    uint32_t *wv_b13 = wv + a0 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a12;
+      uint32_t x1 = wv_a12[i] ^ wv_b13[i];
+      os[i] = x1;);
+    uint32_t *r18 = wv_a12;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r18;
+      uint32_t x1 = r18[i];
+      uint32_t x13 = x1 >> 8U | x1 << 24U;
+      os[i] = x13;);
+    uint32_t *wv_a13 = wv + c * 4U;
+    uint32_t *wv_b14 = wv + d1 * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a13;
+      uint32_t x1 = wv_a13[i] + wv_b14[i];
+      os[i] = x1;);
+    uint32_t *wv_a14 = wv + b * 4U;
+    uint32_t *wv_b = wv + c * 4U;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = wv_a14;
+      uint32_t x1 = wv_a14[i] ^ wv_b[i];
+      os[i] = x1;);
+    uint32_t *r19 = wv_a14;
+    KRML_MAYBE_FOR4(i,
+      0U,
+      4U,
+      1U,
+      uint32_t *os = r19;
+      uint32_t x1 = r19[i];
+      uint32_t x13 = x1 >> 7U | x1 << 25U;
+      os[i] = x13;);
+    uint32_t *r113 = wv + 4U;
+    uint32_t *r2 = wv + 8U;
+    uint32_t *r3 = wv + 12U;
+    uint32_t *r11 = r113;
+    uint32_t x03 = r11[3U];
+    uint32_t x13 = r11[0U];
+    uint32_t x23 = r11[1U];
+    uint32_t x33 = r11[2U];
+    r11[0U] = x03;
+    r11[1U] = x13;
+    r11[2U] = x23;
+    r11[3U] = x33;
+    uint32_t *r114 = r2;
+    uint32_t x04 = r114[2U];
+    uint32_t x14 = r114[3U];
+    uint32_t x24 = r114[0U];
+    uint32_t x34 = r114[1U];
+    r114[0U] = x04;
+    r114[1U] = x14;
+    r114[2U] = x24;
+    r114[3U] = x34;
+    uint32_t *r115 = r3;
+    uint32_t x0 = r115[1U];
+    uint32_t x1 = r115[2U];
+    uint32_t x2 = r115[3U];
+    uint32_t x3 = r115[0U];
+    r115[0U] = x0;
+    r115[1U] = x1;
+    r115[2U] = x2;
+    r115[3U] = x3;);
+  uint32_t *s0 = hash;
+  uint32_t *s1 = hash + 4U;
+  uint32_t *r0 = wv;
+  uint32_t *r1 = wv + 4U;
+  uint32_t *r2 = wv + 8U;
+  uint32_t *r3 = wv + 12U;
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = s0;
+    uint32_t x = s0[i] ^ r0[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = s0;
+    uint32_t x = s0[i] ^ r2[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = s1;
+    uint32_t x = s1[i] ^ r1[i];
+    os[i] = x;);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = s1;
+    uint32_t x = s1[i] ^ r3[i];
+    os[i] = x;);
+}
+
+void Hacl_Hash_Blake2s_init(uint32_t *hash, uint32_t kk, uint32_t nn)
+{
+  uint32_t *r0 = hash;
+  uint32_t *r1 = hash + 4U;
+  uint32_t *r2 = hash + 8U;
+  uint32_t *r3 = hash + 12U;
+  uint32_t iv0 = Hacl_Hash_Blake2s_ivTable_S[0U];
+  uint32_t iv1 = Hacl_Hash_Blake2s_ivTable_S[1U];
+  uint32_t iv2 = Hacl_Hash_Blake2s_ivTable_S[2U];
+  uint32_t iv3 = Hacl_Hash_Blake2s_ivTable_S[3U];
+  uint32_t iv4 = Hacl_Hash_Blake2s_ivTable_S[4U];
+  uint32_t iv5 = Hacl_Hash_Blake2s_ivTable_S[5U];
+  uint32_t iv6 = Hacl_Hash_Blake2s_ivTable_S[6U];
+  uint32_t iv7 = Hacl_Hash_Blake2s_ivTable_S[7U];
+  r2[0U] = iv0;
+  r2[1U] = iv1;
+  r2[2U] = iv2;
+  r2[3U] = iv3;
+  r3[0U] = iv4;
+  r3[1U] = iv5;
+  r3[2U] = iv6;
+  r3[3U] = iv7;
+  uint32_t kk_shift_8 = kk << 8U;
+  uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn));
+  r0[0U] = iv0_;
+  r0[1U] = iv1;
+  r0[2U] = iv2;
+  r0[3U] = iv3;
+  r1[0U] = iv4;
+  r1[1U] = iv5;
+  r1[2U] = iv6;
+  r1[3U] = iv7;
+}
+
+static void update_key(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll)
+{
+  uint64_t lb = (uint64_t)64U;
+  uint8_t b[64U] = { 0U };
+  memcpy(b, k, kk * sizeof (uint8_t));
+  if (ll == 0U)
+  {
+    update_block(wv, hash, true, lb, b);
+  }
+  else
+  {
+    update_block(wv, hash, false, lb, b);
+  }
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2s_update_multi(
+  uint32_t len,
+  uint32_t *wv,
+  uint32_t *hash,
+  uint64_t prev,
+  uint8_t *blocks,
+  uint32_t nb
+)
+{
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U);
+    uint8_t *b = blocks + i * 64U;
+    update_block(wv, hash, false, totlen, b);
+  }
+}
+
+void
+Hacl_Hash_Blake2s_update_last(
+  uint32_t len,
+  uint32_t *wv,
+  uint32_t *hash,
+  uint64_t prev,
+  uint32_t rem,
+  uint8_t *d
+)
+{
+  uint8_t b[64U] = { 0U };
+  uint8_t *last = d + len - rem;
+  memcpy(b, last, rem * sizeof (uint8_t));
+  uint64_t totlen = prev + (uint64_t)len;
+  update_block(wv, hash, true, totlen, b);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+static void
+update_blocks(uint32_t len, uint32_t *wv, uint32_t *hash, uint64_t prev, uint8_t *blocks)
+{
+  uint32_t nb0 = len / 64U;
+  uint32_t rem0 = len % 64U;
+  uint32_t nb;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    nb = nb0 - 1U;
+  }
+  else
+  {
+    nb = nb0;
+  }
+  uint32_t rem;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    rem = 64U;
+  }
+  else
+  {
+    rem = rem0;
+  }
+  Hacl_Hash_Blake2s_update_multi(len, wv, hash, prev, blocks, nb);
+  Hacl_Hash_Blake2s_update_last(len, wv, hash, prev, rem, blocks);
+}
+
+static inline void
+update(uint32_t *wv, uint32_t *hash, uint32_t kk, uint8_t *k, uint32_t ll, uint8_t *d)
+{
+  uint64_t lb = (uint64_t)64U;
+  if (kk > 0U)
+  {
+    update_key(wv, hash, kk, k, ll);
+    if (!(ll == 0U))
+    {
+      update_blocks(ll, wv, hash, lb, d);
+      return;
+    }
+    return;
+  }
+  update_blocks(ll, wv, hash, (uint64_t)0U, d);
+}
+
+void Hacl_Hash_Blake2s_finish(uint32_t nn, uint8_t *output, uint32_t *hash)
+{
+  uint8_t b[32U] = { 0U };
+  uint8_t *first = b;
+  uint8_t *second = b + 16U;
+  uint32_t *row0 = hash;
+  uint32_t *row1 = hash + 4U;
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(first + i * 4U, row0[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(second + i * 4U, row1[i]););
+  uint8_t *final = b;
+  memcpy(output, final, nn * sizeof (uint8_t));
+  Lib_Memzero0_memzero(b, 32U, uint8_t);
+}
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2s_state_t *Hacl_Hash_Blake2s_malloc(void)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
+  uint32_t *b = (uint32_t *)KRML_HOST_CALLOC(16U, sizeof (uint32_t));
+  Hacl_Hash_Blake2s_block_state_t block_state = { .fst = wv, .snd = b };
+  Hacl_Hash_Blake2s_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  Hacl_Hash_Blake2s_state_t
+  *p = (Hacl_Hash_Blake2s_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_Blake2s_state_t));
+  p[0U] = s;
+  Hacl_Hash_Blake2s_init(block_state.snd, 0U, 32U);
+  return p;
+}
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2s_reset(Hacl_Hash_Blake2s_state_t *state)
+{
+  Hacl_Hash_Blake2s_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state;
+  Hacl_Hash_Blake2s_init(block_state.snd, 0U, 32U);
+  Hacl_Hash_Blake2s_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
+}
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2s_update(Hacl_Hash_Blake2s_state_t *state, uint8_t *chunk, uint32_t chunk_len)
+{
+  Hacl_Hash_Blake2s_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    sz = 64U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  if (chunk_len <= 64U - sz)
+  {
+    Hacl_Hash_Blake2s_state_t s1 = *state;
+    Hacl_Hash_Blake2s_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_Hash_Blake2s_state_t s1 = *state;
+    Hacl_Hash_Blake2s_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      uint32_t *wv = block_state1.fst;
+      uint32_t *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    uint32_t *wv = block_state1.fst;
+    uint32_t *hash = block_state1.snd;
+    uint32_t nb = data1_len / 64U;
+    Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_Blake2s_state_t s1 = *state;
+    Hacl_Hash_Blake2s_block_state_t block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 64U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2
+        }
+      );
+    Hacl_Hash_Blake2s_state_t s10 = *state;
+    Hacl_Hash_Blake2s_block_state_t block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      uint32_t *wv = block_state1.fst;
+      uint32_t *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2s_update_multi(64U, wv, hash, prevlen, buf, nb);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    uint32_t *wv = block_state1.fst;
+    uint32_t *hash = block_state1.snd;
+    uint32_t nb = data1_len / 64U;
+    Hacl_Hash_Blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+/**
+  Finish function when there is no key
+*/
+void Hacl_Hash_Blake2s_digest(Hacl_Hash_Blake2s_state_t *state, uint8_t *output)
+{
+  Hacl_Hash_Blake2s_state_t scrut = *state;
+  Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint32_t r;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    r = 64U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  uint8_t *buf_1 = buf_;
+  uint32_t wv0[16U] = { 0U };
+  uint32_t b[16U] = { 0U };
+  Hacl_Hash_Blake2s_block_state_t tmp_block_state = { .fst = wv0, .snd = b };
+  uint32_t *src_b = block_state.snd;
+  uint32_t *dst_b = tmp_block_state.snd;
+  memcpy(dst_b, src_b, 16U * sizeof (uint32_t));
+  uint64_t prev_len = total_len - (uint64_t)r;
+  uint32_t ite;
+  if (r % 64U == 0U && r > 0U)
+  {
+    ite = 64U;
+  }
+  else
+  {
+    ite = r % 64U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  uint32_t *wv1 = tmp_block_state.fst;
+  uint32_t *hash0 = tmp_block_state.snd;
+  uint32_t nb = 0U;
+  Hacl_Hash_Blake2s_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb);
+  uint64_t prev_len_last = total_len - (uint64_t)r;
+  uint32_t *wv = tmp_block_state.fst;
+  uint32_t *hash = tmp_block_state.snd;
+  Hacl_Hash_Blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
+  Hacl_Hash_Blake2s_finish(32U, output, tmp_block_state.snd);
+}
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2s_free(Hacl_Hash_Blake2s_state_t *state)
+{
+  Hacl_Hash_Blake2s_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2s_block_state_t block_state = scrut.block_state;
+  uint32_t *wv = block_state.fst;
+  uint32_t *b = block_state.snd;
+  KRML_HOST_FREE(wv);
+  KRML_HOST_FREE(b);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+/**
+Write the BLAKE2s digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2s_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+)
+{
+  uint32_t b[16U] = { 0U };
+  uint32_t b1[16U] = { 0U };
+  Hacl_Hash_Blake2s_init(b, key_len, output_len);
+  update(b1, b, key_len, key, input_len, input);
+  Hacl_Hash_Blake2s_finish(output_len, output, b);
+  Lib_Memzero0_memzero(b1, 16U, uint32_t);
+  Lib_Memzero0_memzero(b, 16U, uint32_t);
+}
+
diff --git a/src/msvc/Hacl_Hash_Blake2s_128.c b/src/msvc/Hacl_Hash_Blake2s_128.c
deleted file mode 100644
index 86c4f030..00000000
--- a/src/msvc/Hacl_Hash_Blake2s_128.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Hash_Blake2s_128.h"
-
-#include "internal/Hacl_Impl_Blake2_Constants.h"
-#include "internal/Hacl_Hash_Blake2.h"
-#include "lib_memzero0.h"
-
-static inline void
-blake2s_update_block(
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  bool flag,
-  uint64_t totlen,
-  uint8_t *d
-)
-{
-  uint32_t m_w[16U] = { 0U };
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    uint32_t *os = m_w;
-    uint8_t *bj = d + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_zero;
-  uint32_t wv_14;
-  if (flag)
-  {
-    wv_14 = (uint32_t)0xFFFFFFFFU;
-  }
-  else
-  {
-    wv_14 = (uint32_t)0U;
-  }
-  uint32_t wv_15 = (uint32_t)0U;
-  mask =
-    Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen,
-      (uint32_t)(totlen >> (uint32_t)32U),
-      wv_14,
-      wv_15);
-  memcpy(wv, hash, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Lib_IntVector_Intrinsics_vec128 *wv3 = wv + (uint32_t)3U;
-  wv3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv3[0U], mask);
-  KRML_MAYBE_FOR10(i,
-    (uint32_t)0U,
-    (uint32_t)10U,
-    (uint32_t)1U,
-    uint32_t start_idx = i % (uint32_t)10U * (uint32_t)16U;
-    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 m_st[4U] KRML_POST_ALIGN(16) = { 0U };
-    Lib_IntVector_Intrinsics_vec128 *r0 = m_st;
-    Lib_IntVector_Intrinsics_vec128 *r1 = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r20 = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r30 = m_st + (uint32_t)3U;
-    uint32_t s0 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)0U];
-    uint32_t s1 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)1U];
-    uint32_t s2 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)2U];
-    uint32_t s3 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)3U];
-    uint32_t s4 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)4U];
-    uint32_t s5 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)5U];
-    uint32_t s6 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)6U];
-    uint32_t s7 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)7U];
-    uint32_t s8 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)8U];
-    uint32_t s9 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)9U];
-    uint32_t s10 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)10U];
-    uint32_t s11 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)11U];
-    uint32_t s12 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)12U];
-    uint32_t s13 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)13U];
-    uint32_t s14 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)14U];
-    uint32_t s15 = Hacl_Impl_Blake2_Constants_sigmaTable[start_idx + (uint32_t)15U];
-    r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
-    r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
-    r20[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
-    r30[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
-    Lib_IntVector_Intrinsics_vec128 *x = m_st;
-    Lib_IntVector_Intrinsics_vec128 *y = m_st + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *z = m_st + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *w = m_st + (uint32_t)3U;
-    uint32_t a = (uint32_t)0U;
-    uint32_t b0 = (uint32_t)1U;
-    uint32_t c0 = (uint32_t)2U;
-    uint32_t d10 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * (uint32_t)1U;
-    wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]);
-    wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * (uint32_t)1U;
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]);
-    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * (uint32_t)1U;
-    wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * (uint32_t)1U;
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]);
-    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], (uint32_t)12U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * (uint32_t)1U;
-    wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]);
-    wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * (uint32_t)1U;
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]);
-    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], (uint32_t)8U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * (uint32_t)1U;
-    wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * (uint32_t)1U;
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]);
-    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], (uint32_t)7U);
-    Lib_IntVector_Intrinsics_vec128 *r10 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r21 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r31 = wv + (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 v00 = r10[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, (uint32_t)1U);
-    r10[0U] = v1;
-    Lib_IntVector_Intrinsics_vec128 v01 = r21[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, (uint32_t)2U);
-    r21[0U] = v10;
-    Lib_IntVector_Intrinsics_vec128 v02 = r31[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, (uint32_t)3U);
-    r31[0U] = v11;
-    uint32_t a0 = (uint32_t)0U;
-    uint32_t b = (uint32_t)1U;
-    uint32_t c = (uint32_t)2U;
-    uint32_t d1 = (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * (uint32_t)1U;
-    wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]);
-    wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * (uint32_t)1U;
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]);
-    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], (uint32_t)16U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * (uint32_t)1U;
-    wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * (uint32_t)1U;
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]);
-    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], (uint32_t)12U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * (uint32_t)1U;
-    wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]);
-    wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * (uint32_t)1U;
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]);
-    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], (uint32_t)8U);
-    Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * (uint32_t)1U;
-    wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]);
-    Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * (uint32_t)1U;
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]);
-    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], (uint32_t)7U);
-    Lib_IntVector_Intrinsics_vec128 *r11 = wv + (uint32_t)1U;
-    Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U;
-    Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U;
-    Lib_IntVector_Intrinsics_vec128 v0 = r11[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, (uint32_t)3U);
-    r11[0U] = v12;
-    Lib_IntVector_Intrinsics_vec128 v03 = r2[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, (uint32_t)2U);
-    r2[0U] = v13;
-    Lib_IntVector_Intrinsics_vec128 v04 = r3[0U];
-    Lib_IntVector_Intrinsics_vec128
-    v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, (uint32_t)1U);
-    r3[0U] = v14;);
-  Lib_IntVector_Intrinsics_vec128 *s0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *s1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r0 = wv;
-  Lib_IntVector_Intrinsics_vec128 *r1 = wv + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = wv + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = wv + (uint32_t)3U;
-  s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r0[0U]);
-  s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r2[0U]);
-  s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r1[0U]);
-  s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r3[0U]);
-}
-
-void
-Hacl_Blake2s_128_blake2s_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn)
-{
-  Lib_IntVector_Intrinsics_vec128 *r0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *r1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = hash + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = hash + (uint32_t)3U;
-  uint32_t iv0 = Hacl_Impl_Blake2_Constants_ivTable_S[0U];
-  uint32_t iv1 = Hacl_Impl_Blake2_Constants_ivTable_S[1U];
-  uint32_t iv2 = Hacl_Impl_Blake2_Constants_ivTable_S[2U];
-  uint32_t iv3 = Hacl_Impl_Blake2_Constants_ivTable_S[3U];
-  uint32_t iv4 = Hacl_Impl_Blake2_Constants_ivTable_S[4U];
-  uint32_t iv5 = Hacl_Impl_Blake2_Constants_ivTable_S[5U];
-  uint32_t iv6 = Hacl_Impl_Blake2_Constants_ivTable_S[6U];
-  uint32_t iv7 = Hacl_Impl_Blake2_Constants_ivTable_S[7U];
-  r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3);
-  r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
-  uint32_t kk_shift_8 = kk << (uint32_t)8U;
-  uint32_t iv0_ = iv0 ^ ((uint32_t)0x01010000U ^ (kk_shift_8 ^ nn));
-  r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3);
-  r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
-}
-
-void
-Hacl_Blake2s_128_blake2s_update_key(
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll
-)
-{
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  uint8_t b[64U] = { 0U };
-  memcpy(b, k, kk * sizeof (uint8_t));
-  if (ll == (uint32_t)0U)
-  {
-    blake2s_update_block(wv, hash, true, lb, b);
-  }
-  else
-  {
-    blake2s_update_block(wv, hash, false, lb, b);
-  }
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-void
-Hacl_Blake2s_128_blake2s_update_multi(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint64_t prev,
-  uint8_t *blocks,
-  uint32_t nb
-)
-{
-  KRML_HOST_IGNORE(len);
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U);
-    uint8_t *b = blocks + i * (uint32_t)64U;
-    blake2s_update_block(wv, hash, false, totlen, b);
-  }
-}
-
-void
-Hacl_Blake2s_128_blake2s_update_last(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint64_t prev,
-  uint32_t rem,
-  uint8_t *d
-)
-{
-  uint8_t b[64U] = { 0U };
-  uint8_t *last = d + len - rem;
-  memcpy(b, last, rem * sizeof (uint8_t));
-  uint64_t totlen = prev + (uint64_t)len;
-  blake2s_update_block(wv, hash, true, totlen, b);
-  Lib_Memzero0_memzero(b, (uint32_t)64U, uint8_t);
-}
-
-static inline void
-blake2s_update_blocks(
-  uint32_t len,
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint64_t prev,
-  uint8_t *blocks
-)
-{
-  uint32_t nb0 = len / (uint32_t)64U;
-  uint32_t rem0 = len % (uint32_t)64U;
-  K___uint32_t_uint32_t scrut;
-  if (rem0 == (uint32_t)0U && nb0 > (uint32_t)0U)
-  {
-    uint32_t nb_ = nb0 - (uint32_t)1U;
-    uint32_t rem_ = (uint32_t)64U;
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb_, .snd = rem_ });
-  }
-  else
-  {
-    scrut = ((K___uint32_t_uint32_t){ .fst = nb0, .snd = rem0 });
-  }
-  uint32_t nb = scrut.fst;
-  uint32_t rem = scrut.snd;
-  Hacl_Blake2s_128_blake2s_update_multi(len, wv, hash, prev, blocks, nb);
-  Hacl_Blake2s_128_blake2s_update_last(len, wv, hash, prev, rem, blocks);
-}
-
-static inline void
-blake2s_update(
-  Lib_IntVector_Intrinsics_vec128 *wv,
-  Lib_IntVector_Intrinsics_vec128 *hash,
-  uint32_t kk,
-  uint8_t *k,
-  uint32_t ll,
-  uint8_t *d
-)
-{
-  uint64_t lb = (uint64_t)(uint32_t)64U;
-  if (kk > (uint32_t)0U)
-  {
-    Hacl_Blake2s_128_blake2s_update_key(wv, hash, kk, k, ll);
-    if (!(ll == (uint32_t)0U))
-    {
-      blake2s_update_blocks(ll, wv, hash, lb, d);
-      return;
-    }
-    return;
-  }
-  blake2s_update_blocks(ll, wv, hash, (uint64_t)(uint32_t)0U, d);
-}
-
-void
-Hacl_Blake2s_128_blake2s_finish(
-  uint32_t nn,
-  uint8_t *output,
-  Lib_IntVector_Intrinsics_vec128 *hash
-)
-{
-  uint8_t b[32U] = { 0U };
-  uint8_t *first = b;
-  uint8_t *second = b + (uint32_t)16U;
-  Lib_IntVector_Intrinsics_vec128 *row0 = hash;
-  Lib_IntVector_Intrinsics_vec128 *row1 = hash + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128_store32_le(first, row0[0U]);
-  Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]);
-  uint8_t *final = b;
-  memcpy(output, final, nn * sizeof (uint8_t));
-  Lib_Memzero0_memzero(b, (uint32_t)32U, uint8_t);
-}
-
-/**
-Write the BLAKE2s digest of message `d` using key `k` into `output`.
-
-@param nn Length of to-be-generated digest with 1 <= `nn` <= 32.
-@param output Pointer to `nn` bytes of memory where the digest is written to.
-@param ll Length of the input message.
-@param d Pointer to `ll` bytes of memory where the input message is read from.
-@param kk Length of the key. Can be 0.
-@param k Pointer to `kk` bytes of memory where the key is read from.
-*/
-void
-Hacl_Blake2s_128_blake2s(
-  uint32_t nn,
-  uint8_t *output,
-  uint32_t ll,
-  uint8_t *d,
-  uint32_t kk,
-  uint8_t *k
-)
-{
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U };
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b1[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Blake2s_128_blake2s_init(b, kk, nn);
-  blake2s_update(b1, b, kk, k, ll, d);
-  Hacl_Blake2s_128_blake2s_finish(nn, output, b);
-  Lib_Memzero0_memzero(b1, (uint32_t)4U, Lib_IntVector_Intrinsics_vec128);
-  Lib_Memzero0_memzero(b, (uint32_t)4U, Lib_IntVector_Intrinsics_vec128);
-}
-
-void
-Hacl_Blake2s_128_store_state128s_to_state32(
-  uint32_t *st32,
-  Lib_IntVector_Intrinsics_vec128 *st
-)
-{
-  Lib_IntVector_Intrinsics_vec128 *r0 = st;
-  Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U;
-  uint32_t *b0 = st32;
-  uint32_t *b1 = st32 + (uint32_t)4U;
-  uint32_t *b2 = st32 + (uint32_t)8U;
-  uint32_t *b3 = st32 + (uint32_t)12U;
-  uint8_t b8[16U] = { 0U };
-  Lib_IntVector_Intrinsics_vec128_store32_le(b8, r0[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = b0;
-    uint8_t *bj = b8 + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  uint8_t b80[16U] = { 0U };
-  Lib_IntVector_Intrinsics_vec128_store32_le(b80, r1[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = b1;
-    uint8_t *bj = b80 + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  uint8_t b81[16U] = { 0U };
-  Lib_IntVector_Intrinsics_vec128_store32_le(b81, r2[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = b2;
-    uint8_t *bj = b81 + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-  uint8_t b82[16U] = { 0U };
-  Lib_IntVector_Intrinsics_vec128_store32_le(b82, r3[0U]);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint32_t *os = b3;
-    uint8_t *bj = b82 + i * (uint32_t)4U;
-    uint32_t u = load32_le(bj);
-    uint32_t r = u;
-    uint32_t x = r;
-    os[i] = x;);
-}
-
-void
-Hacl_Blake2s_128_load_state128s_from_state32(
-  Lib_IntVector_Intrinsics_vec128 *st,
-  uint32_t *st32
-)
-{
-  Lib_IntVector_Intrinsics_vec128 *r0 = st;
-  Lib_IntVector_Intrinsics_vec128 *r1 = st + (uint32_t)1U;
-  Lib_IntVector_Intrinsics_vec128 *r2 = st + (uint32_t)2U;
-  Lib_IntVector_Intrinsics_vec128 *r3 = st + (uint32_t)3U;
-  uint32_t *b0 = st32;
-  uint32_t *b1 = st32 + (uint32_t)4U;
-  uint32_t *b2 = st32 + (uint32_t)8U;
-  uint32_t *b3 = st32 + (uint32_t)12U;
-  r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b0[0U], b0[1U], b0[2U], b0[3U]);
-  r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b1[0U], b1[1U], b1[2U], b1[3U]);
-  r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b2[0U], b2[1U], b2[2U], b2[3U]);
-  r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b3[0U], b3[1U], b3[2U], b3[3U]);
-}
-
-Lib_IntVector_Intrinsics_vec128 *Hacl_Blake2s_128_blake2s_malloc(void)
-{
-  Lib_IntVector_Intrinsics_vec128
-  *buf =
-    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(buf, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  return buf;
-}
-
diff --git a/src/msvc/Hacl_Hash_Blake2s_Simd128.c b/src/msvc/Hacl_Hash_Blake2s_Simd128.c
new file mode 100644
index 00000000..73f0cccb
--- /dev/null
+++ b/src/msvc/Hacl_Hash_Blake2s_Simd128.c
@@ -0,0 +1,794 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_Hash_Blake2s_Simd128.h"
+
+#include "internal/Hacl_Impl_Blake2_Constants.h"
+#include "lib_memzero0.h"
+
+static inline void
+update_block(
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  bool flag,
+  uint64_t totlen,
+  uint8_t *d
+)
+{
+  uint32_t m_w[16U] = { 0U };
+  KRML_MAYBE_FOR16(i,
+    0U,
+    16U,
+    1U,
+    uint32_t *os = m_w;
+    uint8_t *bj = d + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_zero;
+  uint32_t wv_14;
+  if (flag)
+  {
+    wv_14 = 0xFFFFFFFFU;
+  }
+  else
+  {
+    wv_14 = 0U;
+  }
+  uint32_t wv_15 = 0U;
+  mask =
+    Lib_IntVector_Intrinsics_vec128_load32s((uint32_t)totlen,
+      (uint32_t)(totlen >> 32U),
+      wv_14,
+      wv_15);
+  memcpy(wv, hash, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Lib_IntVector_Intrinsics_vec128 *wv3 = wv + 3U;
+  wv3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv3[0U], mask);
+  KRML_MAYBE_FOR10(i,
+    0U,
+    10U,
+    1U,
+    uint32_t start_idx = i % 10U * 16U;
+    KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 m_st[4U] KRML_POST_ALIGN(16) = { 0U };
+    Lib_IntVector_Intrinsics_vec128 *r0 = m_st;
+    Lib_IntVector_Intrinsics_vec128 *r1 = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r20 = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r30 = m_st + 3U;
+    uint32_t s0 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 0U];
+    uint32_t s1 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 1U];
+    uint32_t s2 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 2U];
+    uint32_t s3 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 3U];
+    uint32_t s4 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 4U];
+    uint32_t s5 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 5U];
+    uint32_t s6 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 6U];
+    uint32_t s7 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 7U];
+    uint32_t s8 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 8U];
+    uint32_t s9 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 9U];
+    uint32_t s10 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 10U];
+    uint32_t s11 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 11U];
+    uint32_t s12 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 12U];
+    uint32_t s13 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 13U];
+    uint32_t s14 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 14U];
+    uint32_t s15 = Hacl_Hash_Blake2s_sigmaTable[start_idx + 15U];
+    r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s0], m_w[s2], m_w[s4], m_w[s6]);
+    r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s1], m_w[s3], m_w[s5], m_w[s7]);
+    r20[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s8], m_w[s10], m_w[s12], m_w[s14]);
+    r30[0U] = Lib_IntVector_Intrinsics_vec128_load32s(m_w[s9], m_w[s11], m_w[s13], m_w[s15]);
+    Lib_IntVector_Intrinsics_vec128 *x = m_st;
+    Lib_IntVector_Intrinsics_vec128 *y = m_st + 1U;
+    Lib_IntVector_Intrinsics_vec128 *z = m_st + 2U;
+    Lib_IntVector_Intrinsics_vec128 *w = m_st + 3U;
+    uint32_t a = 0U;
+    uint32_t b0 = 1U;
+    uint32_t c0 = 2U;
+    uint32_t d10 = 3U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a0 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b0 = wv + b0 * 1U;
+    wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], wv_b0[0U]);
+    wv_a0[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a0[0U], x[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a1 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b1 = wv + a * 1U;
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a1[0U], wv_b1[0U]);
+    wv_a1[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a1[0U], 16U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a2 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b2 = wv + d10 * 1U;
+    wv_a2[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a2[0U], wv_b2[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a3 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b3 = wv + c0 * 1U;
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a3[0U], wv_b3[0U]);
+    wv_a3[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a3[0U], 12U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a4 = wv + a * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b4 = wv + b0 * 1U;
+    wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], wv_b4[0U]);
+    wv_a4[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a4[0U], y[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a5 = wv + d10 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b5 = wv + a * 1U;
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a5[0U], wv_b5[0U]);
+    wv_a5[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a5[0U], 8U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a6 = wv + c0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b6 = wv + d10 * 1U;
+    wv_a6[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a6[0U], wv_b6[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a7 = wv + b0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b7 = wv + c0 * 1U;
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a7[0U], wv_b7[0U]);
+    wv_a7[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a7[0U], 7U);
+    Lib_IntVector_Intrinsics_vec128 *r10 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r21 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r31 = wv + 3U;
+    Lib_IntVector_Intrinsics_vec128 v00 = r10[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v1 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v00, 1U);
+    r10[0U] = v1;
+    Lib_IntVector_Intrinsics_vec128 v01 = r21[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v10 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v01, 2U);
+    r21[0U] = v10;
+    Lib_IntVector_Intrinsics_vec128 v02 = r31[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v11 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v02, 3U);
+    r31[0U] = v11;
+    uint32_t a0 = 0U;
+    uint32_t b = 1U;
+    uint32_t c = 2U;
+    uint32_t d1 = 3U;
+    Lib_IntVector_Intrinsics_vec128 *wv_a = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b8 = wv + b * 1U;
+    wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], wv_b8[0U]);
+    wv_a[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a[0U], z[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a8 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b9 = wv + a0 * 1U;
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a8[0U], wv_b9[0U]);
+    wv_a8[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a8[0U], 16U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a9 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b10 = wv + d1 * 1U;
+    wv_a9[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a9[0U], wv_b10[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a10 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b11 = wv + c * 1U;
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a10[0U], wv_b11[0U]);
+    wv_a10[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a10[0U], 12U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a11 = wv + a0 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b12 = wv + b * 1U;
+    wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], wv_b12[0U]);
+    wv_a11[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a11[0U], w[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a12 = wv + d1 * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b13 = wv + a0 * 1U;
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a12[0U], wv_b13[0U]);
+    wv_a12[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a12[0U], 8U);
+    Lib_IntVector_Intrinsics_vec128 *wv_a13 = wv + c * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b14 = wv + d1 * 1U;
+    wv_a13[0U] = Lib_IntVector_Intrinsics_vec128_add32(wv_a13[0U], wv_b14[0U]);
+    Lib_IntVector_Intrinsics_vec128 *wv_a14 = wv + b * 1U;
+    Lib_IntVector_Intrinsics_vec128 *wv_b = wv + c * 1U;
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_xor(wv_a14[0U], wv_b[0U]);
+    wv_a14[0U] = Lib_IntVector_Intrinsics_vec128_rotate_right32(wv_a14[0U], 7U);
+    Lib_IntVector_Intrinsics_vec128 *r11 = wv + 1U;
+    Lib_IntVector_Intrinsics_vec128 *r2 = wv + 2U;
+    Lib_IntVector_Intrinsics_vec128 *r3 = wv + 3U;
+    Lib_IntVector_Intrinsics_vec128 v0 = r11[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v12 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v0, 3U);
+    r11[0U] = v12;
+    Lib_IntVector_Intrinsics_vec128 v03 = r2[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v13 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v03, 2U);
+    r2[0U] = v13;
+    Lib_IntVector_Intrinsics_vec128 v04 = r3[0U];
+    Lib_IntVector_Intrinsics_vec128
+    v14 = Lib_IntVector_Intrinsics_vec128_rotate_right_lanes32(v04, 1U);
+    r3[0U] = v14;);
+  Lib_IntVector_Intrinsics_vec128 *s0 = hash;
+  Lib_IntVector_Intrinsics_vec128 *s1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r0 = wv;
+  Lib_IntVector_Intrinsics_vec128 *r1 = wv + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = wv + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = wv + 3U;
+  s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r0[0U]);
+  s0[0U] = Lib_IntVector_Intrinsics_vec128_xor(s0[0U], r2[0U]);
+  s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r1[0U]);
+  s1[0U] = Lib_IntVector_Intrinsics_vec128_xor(s1[0U], r3[0U]);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_init(Lib_IntVector_Intrinsics_vec128 *hash, uint32_t kk, uint32_t nn)
+{
+  Lib_IntVector_Intrinsics_vec128 *r0 = hash;
+  Lib_IntVector_Intrinsics_vec128 *r1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = hash + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = hash + 3U;
+  uint32_t iv0 = Hacl_Hash_Blake2s_ivTable_S[0U];
+  uint32_t iv1 = Hacl_Hash_Blake2s_ivTable_S[1U];
+  uint32_t iv2 = Hacl_Hash_Blake2s_ivTable_S[2U];
+  uint32_t iv3 = Hacl_Hash_Blake2s_ivTable_S[3U];
+  uint32_t iv4 = Hacl_Hash_Blake2s_ivTable_S[4U];
+  uint32_t iv5 = Hacl_Hash_Blake2s_ivTable_S[5U];
+  uint32_t iv6 = Hacl_Hash_Blake2s_ivTable_S[6U];
+  uint32_t iv7 = Hacl_Hash_Blake2s_ivTable_S[7U];
+  r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0, iv1, iv2, iv3);
+  r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
+  uint32_t kk_shift_8 = kk << 8U;
+  uint32_t iv0_ = iv0 ^ (0x01010000U ^ (kk_shift_8 ^ nn));
+  r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv0_, iv1, iv2, iv3);
+  r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(iv4, iv5, iv6, iv7);
+}
+
+static void
+update_key(
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint32_t kk,
+  uint8_t *k,
+  uint32_t ll
+)
+{
+  uint64_t lb = (uint64_t)64U;
+  uint8_t b[64U] = { 0U };
+  memcpy(b, k, kk * sizeof (uint8_t));
+  if (ll == 0U)
+  {
+    update_block(wv, hash, true, lb, b);
+  }
+  else
+  {
+    update_block(wv, hash, false, lb, b);
+  }
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_update_multi(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint64_t prev,
+  uint8_t *blocks,
+  uint32_t nb
+)
+{
+  KRML_MAYBE_UNUSED_VAR(len);
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    uint64_t totlen = prev + (uint64_t)((i + 1U) * 64U);
+    uint8_t *b = blocks + i * 64U;
+    update_block(wv, hash, false, totlen, b);
+  }
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_update_last(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint64_t prev,
+  uint32_t rem,
+  uint8_t *d
+)
+{
+  uint8_t b[64U] = { 0U };
+  uint8_t *last = d + len - rem;
+  memcpy(b, last, rem * sizeof (uint8_t));
+  uint64_t totlen = prev + (uint64_t)len;
+  update_block(wv, hash, true, totlen, b);
+  Lib_Memzero0_memzero(b, 64U, uint8_t);
+}
+
+static inline void
+update_blocks(
+  uint32_t len,
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint64_t prev,
+  uint8_t *blocks
+)
+{
+  uint32_t nb0 = len / 64U;
+  uint32_t rem0 = len % 64U;
+  uint32_t nb;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    nb = nb0 - 1U;
+  }
+  else
+  {
+    nb = nb0;
+  }
+  uint32_t rem;
+  if (rem0 == 0U && nb0 > 0U)
+  {
+    rem = 64U;
+  }
+  else
+  {
+    rem = rem0;
+  }
+  Hacl_Hash_Blake2s_Simd128_update_multi(len, wv, hash, prev, blocks, nb);
+  Hacl_Hash_Blake2s_Simd128_update_last(len, wv, hash, prev, rem, blocks);
+}
+
+static inline void
+update(
+  Lib_IntVector_Intrinsics_vec128 *wv,
+  Lib_IntVector_Intrinsics_vec128 *hash,
+  uint32_t kk,
+  uint8_t *k,
+  uint32_t ll,
+  uint8_t *d
+)
+{
+  uint64_t lb = (uint64_t)64U;
+  if (kk > 0U)
+  {
+    update_key(wv, hash, kk, k, ll);
+    if (!(ll == 0U))
+    {
+      update_blocks(ll, wv, hash, lb, d);
+      return;
+    }
+    return;
+  }
+  update_blocks(ll, wv, hash, (uint64_t)0U, d);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_finish(
+  uint32_t nn,
+  uint8_t *output,
+  Lib_IntVector_Intrinsics_vec128 *hash
+)
+{
+  uint8_t b[32U] = { 0U };
+  uint8_t *first = b;
+  uint8_t *second = b + 16U;
+  Lib_IntVector_Intrinsics_vec128 *row0 = hash;
+  Lib_IntVector_Intrinsics_vec128 *row1 = hash + 1U;
+  Lib_IntVector_Intrinsics_vec128_store32_le(first, row0[0U]);
+  Lib_IntVector_Intrinsics_vec128_store32_le(second, row1[0U]);
+  uint8_t *final = b;
+  memcpy(output, final, nn * sizeof (uint8_t));
+  Lib_Memzero0_memzero(b, 32U, uint8_t);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_store_state128s_to_state32(
+  uint32_t *st32,
+  Lib_IntVector_Intrinsics_vec128 *st
+)
+{
+  Lib_IntVector_Intrinsics_vec128 *r0 = st;
+  Lib_IntVector_Intrinsics_vec128 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = st + 3U;
+  uint32_t *b0 = st32;
+  uint32_t *b1 = st32 + 4U;
+  uint32_t *b2 = st32 + 8U;
+  uint32_t *b3 = st32 + 12U;
+  uint8_t b8[16U] = { 0U };
+  Lib_IntVector_Intrinsics_vec128_store32_le(b8, r0[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = b0;
+    uint8_t *bj = b8 + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  uint8_t b80[16U] = { 0U };
+  Lib_IntVector_Intrinsics_vec128_store32_le(b80, r1[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = b1;
+    uint8_t *bj = b80 + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  uint8_t b81[16U] = { 0U };
+  Lib_IntVector_Intrinsics_vec128_store32_le(b81, r2[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = b2;
+    uint8_t *bj = b81 + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+  uint8_t b82[16U] = { 0U };
+  Lib_IntVector_Intrinsics_vec128_store32_le(b82, r3[0U]);
+  KRML_MAYBE_FOR4(i,
+    0U,
+    4U,
+    1U,
+    uint32_t *os = b3;
+    uint8_t *bj = b82 + i * 4U;
+    uint32_t u = load32_le(bj);
+    uint32_t r = u;
+    uint32_t x = r;
+    os[i] = x;);
+}
+
+void
+Hacl_Hash_Blake2s_Simd128_load_state128s_from_state32(
+  Lib_IntVector_Intrinsics_vec128 *st,
+  uint32_t *st32
+)
+{
+  Lib_IntVector_Intrinsics_vec128 *r0 = st;
+  Lib_IntVector_Intrinsics_vec128 *r1 = st + 1U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = st + 2U;
+  Lib_IntVector_Intrinsics_vec128 *r3 = st + 3U;
+  uint32_t *b0 = st32;
+  uint32_t *b1 = st32 + 4U;
+  uint32_t *b2 = st32 + 8U;
+  uint32_t *b3 = st32 + 12U;
+  r0[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b0[0U], b0[1U], b0[2U], b0[3U]);
+  r1[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b1[0U], b1[1U], b1[2U], b1[3U]);
+  r2[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b2[0U], b2[1U], b2[2U], b2[3U]);
+  r3[0U] = Lib_IntVector_Intrinsics_vec128_load32s(b3[0U], b3[1U], b3[2U], b3[3U]);
+}
+
+Lib_IntVector_Intrinsics_vec128 *Hacl_Hash_Blake2s_Simd128_malloc_with_key(void)
+{
+  Lib_IntVector_Intrinsics_vec128
+  *buf =
+    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(buf, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  return buf;
+}
+
+/**
+  State allocation function when there is no key
+*/
+Hacl_Hash_Blake2s_Simd128_state_t *Hacl_Hash_Blake2s_Simd128_malloc(void)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  Lib_IntVector_Intrinsics_vec128
+  *wv =
+    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(wv, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Lib_IntVector_Intrinsics_vec128
+  *b =
+    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 4U);
+  memset(b, 0U, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state = { .fst = wv, .snd = b };
+  Hacl_Hash_Blake2s_Simd128_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  Hacl_Hash_Blake2s_Simd128_state_t
+  *p =
+    (Hacl_Hash_Blake2s_Simd128_state_t *)KRML_HOST_MALLOC(sizeof (
+        Hacl_Hash_Blake2s_Simd128_state_t
+      ));
+  p[0U] = s;
+  Hacl_Hash_Blake2s_Simd128_init(block_state.snd, 0U, 32U);
+  return p;
+}
+
+/**
+  Re-initialization function when there is no key
+*/
+void Hacl_Hash_Blake2s_Simd128_reset(Hacl_Hash_Blake2s_Simd128_state_t *state)
+{
+  Hacl_Hash_Blake2s_Simd128_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state;
+  Hacl_Hash_Blake2s_Simd128_init(block_state.snd, 0U, 32U);
+  Hacl_Hash_Blake2s_Simd128_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
+}
+
+/**
+  Update function when there is no key; 0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_Hash_Blake2s_Simd128_update(
+  Hacl_Hash_Blake2s_Simd128_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+)
+{
+  Hacl_Hash_Blake2s_Simd128_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    sz = 64U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  if (chunk_len <= 64U - sz)
+  {
+    Hacl_Hash_Blake2s_Simd128_state_t s1 = *state;
+    Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_Hash_Blake2s_Simd128_state_t s1 = *state;
+    Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
+      Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
+    Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
+    uint32_t nb = data1_len / 64U;
+    Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_Blake2s_Simd128_state_t s1 = *state;
+    Hacl_Hash_Blake2s_Simd128_block_state_t block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 64U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_Simd128_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2
+        }
+      );
+    Hacl_Hash_Blake2s_Simd128_state_t s10 = *state;
+    Hacl_Hash_Blake2s_Simd128_block_state_t block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      uint64_t prevlen = total_len1 - (uint64_t)sz1;
+      Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
+      Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
+      uint32_t nb = 1U;
+      Hacl_Hash_Blake2s_Simd128_update_multi(64U, wv, hash, prevlen, buf, nb);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
+    Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
+    uint32_t nb = data1_len / 64U;
+    Hacl_Hash_Blake2s_Simd128_update_multi(data1_len, wv, hash, total_len1, data1, nb);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_Hash_Blake2s_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+/**
+  Finish function when there is no key
+*/
+void
+Hacl_Hash_Blake2s_Simd128_digest(Hacl_Hash_Blake2s_Simd128_state_t *state, uint8_t *output)
+{
+  Hacl_Hash_Blake2s_Simd128_state_t scrut = *state;
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint32_t r;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    r = 64U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  uint8_t *buf_1 = buf_;
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U };
+  Hacl_Hash_Blake2s_Simd128_block_state_t tmp_block_state = { .fst = wv0, .snd = b };
+  Lib_IntVector_Intrinsics_vec128 *src_b = block_state.snd;
+  Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.snd;
+  memcpy(dst_b, src_b, 4U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  uint64_t prev_len = total_len - (uint64_t)r;
+  uint32_t ite;
+  if (r % 64U == 0U && r > 0U)
+  {
+    ite = 64U;
+  }
+  else
+  {
+    ite = r % 64U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  Lib_IntVector_Intrinsics_vec128 *wv1 = tmp_block_state.fst;
+  Lib_IntVector_Intrinsics_vec128 *hash0 = tmp_block_state.snd;
+  uint32_t nb = 0U;
+  Hacl_Hash_Blake2s_Simd128_update_multi(0U, wv1, hash0, prev_len, buf_multi, nb);
+  uint64_t prev_len_last = total_len - (uint64_t)r;
+  Lib_IntVector_Intrinsics_vec128 *wv = tmp_block_state.fst;
+  Lib_IntVector_Intrinsics_vec128 *hash = tmp_block_state.snd;
+  Hacl_Hash_Blake2s_Simd128_update_last(r, wv, hash, prev_len_last, r, buf_last);
+  Hacl_Hash_Blake2s_Simd128_finish(32U, output, tmp_block_state.snd);
+}
+
+/**
+  Free state function when there is no key
+*/
+void Hacl_Hash_Blake2s_Simd128_free(Hacl_Hash_Blake2s_Simd128_state_t *state)
+{
+  Hacl_Hash_Blake2s_Simd128_state_t scrut = *state;
+  uint8_t *buf = scrut.buf;
+  Hacl_Hash_Blake2s_Simd128_block_state_t block_state = scrut.block_state;
+  Lib_IntVector_Intrinsics_vec128 *wv = block_state.fst;
+  Lib_IntVector_Intrinsics_vec128 *b = block_state.snd;
+  KRML_ALIGNED_FREE(wv);
+  KRML_ALIGNED_FREE(b);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+/**
+Write the BLAKE2s digest of message `input` using key `key` into `output`.
+
+@param output Pointer to `output_len` bytes of memory where the digest is written to.
+@param output_len Length of the to-be-generated digest with 1 <= `output_len` <= 32.
+@param input Pointer to `input_len` bytes of memory where the input message is read from.
+@param input_len Length of the input message.
+@param key Pointer to `key_len` bytes of memory where the key is read from.
+@param key_len Length of the key. Can be 0.
+*/
+void
+Hacl_Hash_Blake2s_Simd128_hash_with_key(
+  uint8_t *output,
+  uint32_t output_len,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key,
+  uint32_t key_len
+)
+{
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U };
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b1[4U] KRML_POST_ALIGN(16) = { 0U };
+  Hacl_Hash_Blake2s_Simd128_init(b, key_len, output_len);
+  update(b1, b, key_len, key, input_len, input);
+  Hacl_Hash_Blake2s_Simd128_finish(output_len, output, b);
+  Lib_Memzero0_memzero(b1, 4U, Lib_IntVector_Intrinsics_vec128);
+  Lib_Memzero0_memzero(b, 4U, Lib_IntVector_Intrinsics_vec128);
+}
+
diff --git a/src/msvc/Hacl_Hash_MD5.c b/src/msvc/Hacl_Hash_MD5.c
index 222ac824..ed294839 100644
--- a/src/msvc/Hacl_Hash_MD5.c
+++ b/src/msvc/Hacl_Hash_MD5.c
@@ -25,37 +25,29 @@
 
 #include "internal/Hacl_Hash_MD5.h"
 
-static uint32_t
-_h0[4U] =
-  { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
+static uint32_t _h0[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
 
 static uint32_t
 _t[64U] =
   {
-    (uint32_t)0xd76aa478U, (uint32_t)0xe8c7b756U, (uint32_t)0x242070dbU, (uint32_t)0xc1bdceeeU,
-    (uint32_t)0xf57c0fafU, (uint32_t)0x4787c62aU, (uint32_t)0xa8304613U, (uint32_t)0xfd469501U,
-    (uint32_t)0x698098d8U, (uint32_t)0x8b44f7afU, (uint32_t)0xffff5bb1U, (uint32_t)0x895cd7beU,
-    (uint32_t)0x6b901122U, (uint32_t)0xfd987193U, (uint32_t)0xa679438eU, (uint32_t)0x49b40821U,
-    (uint32_t)0xf61e2562U, (uint32_t)0xc040b340U, (uint32_t)0x265e5a51U, (uint32_t)0xe9b6c7aaU,
-    (uint32_t)0xd62f105dU, (uint32_t)0x02441453U, (uint32_t)0xd8a1e681U, (uint32_t)0xe7d3fbc8U,
-    (uint32_t)0x21e1cde6U, (uint32_t)0xc33707d6U, (uint32_t)0xf4d50d87U, (uint32_t)0x455a14edU,
-    (uint32_t)0xa9e3e905U, (uint32_t)0xfcefa3f8U, (uint32_t)0x676f02d9U, (uint32_t)0x8d2a4c8aU,
-    (uint32_t)0xfffa3942U, (uint32_t)0x8771f681U, (uint32_t)0x6d9d6122U, (uint32_t)0xfde5380cU,
-    (uint32_t)0xa4beea44U, (uint32_t)0x4bdecfa9U, (uint32_t)0xf6bb4b60U, (uint32_t)0xbebfbc70U,
-    (uint32_t)0x289b7ec6U, (uint32_t)0xeaa127faU, (uint32_t)0xd4ef3085U, (uint32_t)0x4881d05U,
-    (uint32_t)0xd9d4d039U, (uint32_t)0xe6db99e5U, (uint32_t)0x1fa27cf8U, (uint32_t)0xc4ac5665U,
-    (uint32_t)0xf4292244U, (uint32_t)0x432aff97U, (uint32_t)0xab9423a7U, (uint32_t)0xfc93a039U,
-    (uint32_t)0x655b59c3U, (uint32_t)0x8f0ccc92U, (uint32_t)0xffeff47dU, (uint32_t)0x85845dd1U,
-    (uint32_t)0x6fa87e4fU, (uint32_t)0xfe2ce6e0U, (uint32_t)0xa3014314U, (uint32_t)0x4e0811a1U,
-    (uint32_t)0xf7537e82U, (uint32_t)0xbd3af235U, (uint32_t)0x2ad7d2bbU, (uint32_t)0xeb86d391U
+    0xd76aa478U, 0xe8c7b756U, 0x242070dbU, 0xc1bdceeeU, 0xf57c0fafU, 0x4787c62aU, 0xa8304613U,
+    0xfd469501U, 0x698098d8U, 0x8b44f7afU, 0xffff5bb1U, 0x895cd7beU, 0x6b901122U, 0xfd987193U,
+    0xa679438eU, 0x49b40821U, 0xf61e2562U, 0xc040b340U, 0x265e5a51U, 0xe9b6c7aaU, 0xd62f105dU,
+    0x02441453U, 0xd8a1e681U, 0xe7d3fbc8U, 0x21e1cde6U, 0xc33707d6U, 0xf4d50d87U, 0x455a14edU,
+    0xa9e3e905U, 0xfcefa3f8U, 0x676f02d9U, 0x8d2a4c8aU, 0xfffa3942U, 0x8771f681U, 0x6d9d6122U,
+    0xfde5380cU, 0xa4beea44U, 0x4bdecfa9U, 0xf6bb4b60U, 0xbebfbc70U, 0x289b7ec6U, 0xeaa127faU,
+    0xd4ef3085U, 0x4881d05U, 0xd9d4d039U, 0xe6db99e5U, 0x1fa27cf8U, 0xc4ac5665U, 0xf4292244U,
+    0x432aff97U, 0xab9423a7U, 0xfc93a039U, 0x655b59c3U, 0x8f0ccc92U, 0xffeff47dU, 0x85845dd1U,
+    0x6fa87e4fU, 0xfe2ce6e0U, 0xa3014314U, 0x4e0811a1U, 0xf7537e82U, 0xbd3af235U, 0x2ad7d2bbU,
+    0xeb86d391U
   };
 
-void Hacl_Hash_Core_MD5_legacy_init(uint32_t *s)
+void Hacl_Hash_MD5_init(uint32_t *s)
 {
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, s[i] = _h0[i];);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, s[i] = _h0[i];);
 }
 
-static void legacy_update(uint32_t *abcd, uint8_t *x)
+static void update(uint32_t *abcd, uint8_t *x)
 {
   uint32_t aa = abcd[0U];
   uint32_t bb = abcd[1U];
@@ -74,14 +66,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb0
     +
       ((va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0)
-      << (uint32_t)7U
-      | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> (uint32_t)25U);
+      << 7U
+      | (va + ((vb0 & vc0) | (~vb0 & vd0)) + xk + ti0) >> 25U);
   abcd[0U] = v;
   uint32_t va0 = abcd[3U];
   uint32_t vb1 = abcd[0U];
   uint32_t vc1 = abcd[1U];
   uint32_t vd1 = abcd[2U];
-  uint8_t *b1 = x + (uint32_t)4U;
+  uint8_t *b1 = x + 4U;
   uint32_t u0 = load32_le(b1);
   uint32_t xk0 = u0;
   uint32_t ti1 = _t[1U];
@@ -90,14 +82,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb1
     +
       ((va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1)
-      << (uint32_t)12U
-      | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> (uint32_t)20U);
+      << 12U
+      | (va0 + ((vb1 & vc1) | (~vb1 & vd1)) + xk0 + ti1) >> 20U);
   abcd[3U] = v0;
   uint32_t va1 = abcd[2U];
   uint32_t vb2 = abcd[3U];
   uint32_t vc2 = abcd[0U];
   uint32_t vd2 = abcd[1U];
-  uint8_t *b2 = x + (uint32_t)8U;
+  uint8_t *b2 = x + 8U;
   uint32_t u1 = load32_le(b2);
   uint32_t xk1 = u1;
   uint32_t ti2 = _t[2U];
@@ -106,14 +98,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb2
     +
       ((va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2)
-      << (uint32_t)17U
-      | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> (uint32_t)15U);
+      << 17U
+      | (va1 + ((vb2 & vc2) | (~vb2 & vd2)) + xk1 + ti2) >> 15U);
   abcd[2U] = v1;
   uint32_t va2 = abcd[1U];
   uint32_t vb3 = abcd[2U];
   uint32_t vc3 = abcd[3U];
   uint32_t vd3 = abcd[0U];
-  uint8_t *b3 = x + (uint32_t)12U;
+  uint8_t *b3 = x + 12U;
   uint32_t u2 = load32_le(b3);
   uint32_t xk2 = u2;
   uint32_t ti3 = _t[3U];
@@ -122,14 +114,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb3
     +
       ((va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3)
-      << (uint32_t)22U
-      | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> (uint32_t)10U);
+      << 22U
+      | (va2 + ((vb3 & vc3) | (~vb3 & vd3)) + xk2 + ti3) >> 10U);
   abcd[1U] = v2;
   uint32_t va3 = abcd[0U];
   uint32_t vb4 = abcd[1U];
   uint32_t vc4 = abcd[2U];
   uint32_t vd4 = abcd[3U];
-  uint8_t *b4 = x + (uint32_t)16U;
+  uint8_t *b4 = x + 16U;
   uint32_t u3 = load32_le(b4);
   uint32_t xk3 = u3;
   uint32_t ti4 = _t[4U];
@@ -138,14 +130,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb4
     +
       ((va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4)
-      << (uint32_t)7U
-      | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> (uint32_t)25U);
+      << 7U
+      | (va3 + ((vb4 & vc4) | (~vb4 & vd4)) + xk3 + ti4) >> 25U);
   abcd[0U] = v3;
   uint32_t va4 = abcd[3U];
   uint32_t vb5 = abcd[0U];
   uint32_t vc5 = abcd[1U];
   uint32_t vd5 = abcd[2U];
-  uint8_t *b5 = x + (uint32_t)20U;
+  uint8_t *b5 = x + 20U;
   uint32_t u4 = load32_le(b5);
   uint32_t xk4 = u4;
   uint32_t ti5 = _t[5U];
@@ -154,14 +146,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb5
     +
       ((va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5)
-      << (uint32_t)12U
-      | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> (uint32_t)20U);
+      << 12U
+      | (va4 + ((vb5 & vc5) | (~vb5 & vd5)) + xk4 + ti5) >> 20U);
   abcd[3U] = v4;
   uint32_t va5 = abcd[2U];
   uint32_t vb6 = abcd[3U];
   uint32_t vc6 = abcd[0U];
   uint32_t vd6 = abcd[1U];
-  uint8_t *b6 = x + (uint32_t)24U;
+  uint8_t *b6 = x + 24U;
   uint32_t u5 = load32_le(b6);
   uint32_t xk5 = u5;
   uint32_t ti6 = _t[6U];
@@ -170,14 +162,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb6
     +
       ((va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6)
-      << (uint32_t)17U
-      | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> (uint32_t)15U);
+      << 17U
+      | (va5 + ((vb6 & vc6) | (~vb6 & vd6)) + xk5 + ti6) >> 15U);
   abcd[2U] = v5;
   uint32_t va6 = abcd[1U];
   uint32_t vb7 = abcd[2U];
   uint32_t vc7 = abcd[3U];
   uint32_t vd7 = abcd[0U];
-  uint8_t *b7 = x + (uint32_t)28U;
+  uint8_t *b7 = x + 28U;
   uint32_t u6 = load32_le(b7);
   uint32_t xk6 = u6;
   uint32_t ti7 = _t[7U];
@@ -186,14 +178,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb7
     +
       ((va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7)
-      << (uint32_t)22U
-      | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> (uint32_t)10U);
+      << 22U
+      | (va6 + ((vb7 & vc7) | (~vb7 & vd7)) + xk6 + ti7) >> 10U);
   abcd[1U] = v6;
   uint32_t va7 = abcd[0U];
   uint32_t vb8 = abcd[1U];
   uint32_t vc8 = abcd[2U];
   uint32_t vd8 = abcd[3U];
-  uint8_t *b8 = x + (uint32_t)32U;
+  uint8_t *b8 = x + 32U;
   uint32_t u7 = load32_le(b8);
   uint32_t xk7 = u7;
   uint32_t ti8 = _t[8U];
@@ -202,14 +194,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb8
     +
       ((va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8)
-      << (uint32_t)7U
-      | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> (uint32_t)25U);
+      << 7U
+      | (va7 + ((vb8 & vc8) | (~vb8 & vd8)) + xk7 + ti8) >> 25U);
   abcd[0U] = v7;
   uint32_t va8 = abcd[3U];
   uint32_t vb9 = abcd[0U];
   uint32_t vc9 = abcd[1U];
   uint32_t vd9 = abcd[2U];
-  uint8_t *b9 = x + (uint32_t)36U;
+  uint8_t *b9 = x + 36U;
   uint32_t u8 = load32_le(b9);
   uint32_t xk8 = u8;
   uint32_t ti9 = _t[9U];
@@ -218,14 +210,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb9
     +
       ((va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9)
-      << (uint32_t)12U
-      | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> (uint32_t)20U);
+      << 12U
+      | (va8 + ((vb9 & vc9) | (~vb9 & vd9)) + xk8 + ti9) >> 20U);
   abcd[3U] = v8;
   uint32_t va9 = abcd[2U];
   uint32_t vb10 = abcd[3U];
   uint32_t vc10 = abcd[0U];
   uint32_t vd10 = abcd[1U];
-  uint8_t *b10 = x + (uint32_t)40U;
+  uint8_t *b10 = x + 40U;
   uint32_t u9 = load32_le(b10);
   uint32_t xk9 = u9;
   uint32_t ti10 = _t[10U];
@@ -234,14 +226,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb10
     +
       ((va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10)
-      << (uint32_t)17U
-      | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> (uint32_t)15U);
+      << 17U
+      | (va9 + ((vb10 & vc10) | (~vb10 & vd10)) + xk9 + ti10) >> 15U);
   abcd[2U] = v9;
   uint32_t va10 = abcd[1U];
   uint32_t vb11 = abcd[2U];
   uint32_t vc11 = abcd[3U];
   uint32_t vd11 = abcd[0U];
-  uint8_t *b11 = x + (uint32_t)44U;
+  uint8_t *b11 = x + 44U;
   uint32_t u10 = load32_le(b11);
   uint32_t xk10 = u10;
   uint32_t ti11 = _t[11U];
@@ -250,14 +242,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb11
     +
       ((va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11)
-      << (uint32_t)22U
-      | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> (uint32_t)10U);
+      << 22U
+      | (va10 + ((vb11 & vc11) | (~vb11 & vd11)) + xk10 + ti11) >> 10U);
   abcd[1U] = v10;
   uint32_t va11 = abcd[0U];
   uint32_t vb12 = abcd[1U];
   uint32_t vc12 = abcd[2U];
   uint32_t vd12 = abcd[3U];
-  uint8_t *b12 = x + (uint32_t)48U;
+  uint8_t *b12 = x + 48U;
   uint32_t u11 = load32_le(b12);
   uint32_t xk11 = u11;
   uint32_t ti12 = _t[12U];
@@ -266,14 +258,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb12
     +
       ((va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12)
-      << (uint32_t)7U
-      | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> (uint32_t)25U);
+      << 7U
+      | (va11 + ((vb12 & vc12) | (~vb12 & vd12)) + xk11 + ti12) >> 25U);
   abcd[0U] = v11;
   uint32_t va12 = abcd[3U];
   uint32_t vb13 = abcd[0U];
   uint32_t vc13 = abcd[1U];
   uint32_t vd13 = abcd[2U];
-  uint8_t *b13 = x + (uint32_t)52U;
+  uint8_t *b13 = x + 52U;
   uint32_t u12 = load32_le(b13);
   uint32_t xk12 = u12;
   uint32_t ti13 = _t[13U];
@@ -282,14 +274,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb13
     +
       ((va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13)
-      << (uint32_t)12U
-      | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> (uint32_t)20U);
+      << 12U
+      | (va12 + ((vb13 & vc13) | (~vb13 & vd13)) + xk12 + ti13) >> 20U);
   abcd[3U] = v12;
   uint32_t va13 = abcd[2U];
   uint32_t vb14 = abcd[3U];
   uint32_t vc14 = abcd[0U];
   uint32_t vd14 = abcd[1U];
-  uint8_t *b14 = x + (uint32_t)56U;
+  uint8_t *b14 = x + 56U;
   uint32_t u13 = load32_le(b14);
   uint32_t xk13 = u13;
   uint32_t ti14 = _t[14U];
@@ -298,14 +290,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb14
     +
       ((va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14)
-      << (uint32_t)17U
-      | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> (uint32_t)15U);
+      << 17U
+      | (va13 + ((vb14 & vc14) | (~vb14 & vd14)) + xk13 + ti14) >> 15U);
   abcd[2U] = v13;
   uint32_t va14 = abcd[1U];
   uint32_t vb15 = abcd[2U];
   uint32_t vc15 = abcd[3U];
   uint32_t vd15 = abcd[0U];
-  uint8_t *b15 = x + (uint32_t)60U;
+  uint8_t *b15 = x + 60U;
   uint32_t u14 = load32_le(b15);
   uint32_t xk14 = u14;
   uint32_t ti15 = _t[15U];
@@ -314,14 +306,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb15
     +
       ((va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15)
-      << (uint32_t)22U
-      | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> (uint32_t)10U);
+      << 22U
+      | (va14 + ((vb15 & vc15) | (~vb15 & vd15)) + xk14 + ti15) >> 10U);
   abcd[1U] = v14;
   uint32_t va15 = abcd[0U];
   uint32_t vb16 = abcd[1U];
   uint32_t vc16 = abcd[2U];
   uint32_t vd16 = abcd[3U];
-  uint8_t *b16 = x + (uint32_t)4U;
+  uint8_t *b16 = x + 4U;
   uint32_t u15 = load32_le(b16);
   uint32_t xk15 = u15;
   uint32_t ti16 = _t[16U];
@@ -330,14 +322,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb16
     +
       ((va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16)
-      << (uint32_t)5U
-      | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> (uint32_t)27U);
+      << 5U
+      | (va15 + ((vb16 & vd16) | (vc16 & ~vd16)) + xk15 + ti16) >> 27U);
   abcd[0U] = v15;
   uint32_t va16 = abcd[3U];
   uint32_t vb17 = abcd[0U];
   uint32_t vc17 = abcd[1U];
   uint32_t vd17 = abcd[2U];
-  uint8_t *b17 = x + (uint32_t)24U;
+  uint8_t *b17 = x + 24U;
   uint32_t u16 = load32_le(b17);
   uint32_t xk16 = u16;
   uint32_t ti17 = _t[17U];
@@ -346,14 +338,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb17
     +
       ((va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17)
-      << (uint32_t)9U
-      | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> (uint32_t)23U);
+      << 9U
+      | (va16 + ((vb17 & vd17) | (vc17 & ~vd17)) + xk16 + ti17) >> 23U);
   abcd[3U] = v16;
   uint32_t va17 = abcd[2U];
   uint32_t vb18 = abcd[3U];
   uint32_t vc18 = abcd[0U];
   uint32_t vd18 = abcd[1U];
-  uint8_t *b18 = x + (uint32_t)44U;
+  uint8_t *b18 = x + 44U;
   uint32_t u17 = load32_le(b18);
   uint32_t xk17 = u17;
   uint32_t ti18 = _t[18U];
@@ -362,8 +354,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb18
     +
       ((va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18)
-      << (uint32_t)14U
-      | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> (uint32_t)18U);
+      << 14U
+      | (va17 + ((vb18 & vd18) | (vc18 & ~vd18)) + xk17 + ti18) >> 18U);
   abcd[2U] = v17;
   uint32_t va18 = abcd[1U];
   uint32_t vb19 = abcd[2U];
@@ -378,14 +370,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb19
     +
       ((va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19)
-      << (uint32_t)20U
-      | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> (uint32_t)12U);
+      << 20U
+      | (va18 + ((vb19 & vd19) | (vc19 & ~vd19)) + xk18 + ti19) >> 12U);
   abcd[1U] = v18;
   uint32_t va19 = abcd[0U];
   uint32_t vb20 = abcd[1U];
   uint32_t vc20 = abcd[2U];
   uint32_t vd20 = abcd[3U];
-  uint8_t *b20 = x + (uint32_t)20U;
+  uint8_t *b20 = x + 20U;
   uint32_t u19 = load32_le(b20);
   uint32_t xk19 = u19;
   uint32_t ti20 = _t[20U];
@@ -394,14 +386,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb20
     +
       ((va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20)
-      << (uint32_t)5U
-      | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> (uint32_t)27U);
+      << 5U
+      | (va19 + ((vb20 & vd20) | (vc20 & ~vd20)) + xk19 + ti20) >> 27U);
   abcd[0U] = v19;
   uint32_t va20 = abcd[3U];
   uint32_t vb21 = abcd[0U];
   uint32_t vc21 = abcd[1U];
   uint32_t vd21 = abcd[2U];
-  uint8_t *b21 = x + (uint32_t)40U;
+  uint8_t *b21 = x + 40U;
   uint32_t u20 = load32_le(b21);
   uint32_t xk20 = u20;
   uint32_t ti21 = _t[21U];
@@ -410,14 +402,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb21
     +
       ((va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21)
-      << (uint32_t)9U
-      | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> (uint32_t)23U);
+      << 9U
+      | (va20 + ((vb21 & vd21) | (vc21 & ~vd21)) + xk20 + ti21) >> 23U);
   abcd[3U] = v20;
   uint32_t va21 = abcd[2U];
   uint32_t vb22 = abcd[3U];
   uint32_t vc22 = abcd[0U];
   uint32_t vd22 = abcd[1U];
-  uint8_t *b22 = x + (uint32_t)60U;
+  uint8_t *b22 = x + 60U;
   uint32_t u21 = load32_le(b22);
   uint32_t xk21 = u21;
   uint32_t ti22 = _t[22U];
@@ -426,14 +418,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb22
     +
       ((va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22)
-      << (uint32_t)14U
-      | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> (uint32_t)18U);
+      << 14U
+      | (va21 + ((vb22 & vd22) | (vc22 & ~vd22)) + xk21 + ti22) >> 18U);
   abcd[2U] = v21;
   uint32_t va22 = abcd[1U];
   uint32_t vb23 = abcd[2U];
   uint32_t vc23 = abcd[3U];
   uint32_t vd23 = abcd[0U];
-  uint8_t *b23 = x + (uint32_t)16U;
+  uint8_t *b23 = x + 16U;
   uint32_t u22 = load32_le(b23);
   uint32_t xk22 = u22;
   uint32_t ti23 = _t[23U];
@@ -442,14 +434,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb23
     +
       ((va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23)
-      << (uint32_t)20U
-      | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> (uint32_t)12U);
+      << 20U
+      | (va22 + ((vb23 & vd23) | (vc23 & ~vd23)) + xk22 + ti23) >> 12U);
   abcd[1U] = v22;
   uint32_t va23 = abcd[0U];
   uint32_t vb24 = abcd[1U];
   uint32_t vc24 = abcd[2U];
   uint32_t vd24 = abcd[3U];
-  uint8_t *b24 = x + (uint32_t)36U;
+  uint8_t *b24 = x + 36U;
   uint32_t u23 = load32_le(b24);
   uint32_t xk23 = u23;
   uint32_t ti24 = _t[24U];
@@ -458,14 +450,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb24
     +
       ((va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24)
-      << (uint32_t)5U
-      | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> (uint32_t)27U);
+      << 5U
+      | (va23 + ((vb24 & vd24) | (vc24 & ~vd24)) + xk23 + ti24) >> 27U);
   abcd[0U] = v23;
   uint32_t va24 = abcd[3U];
   uint32_t vb25 = abcd[0U];
   uint32_t vc25 = abcd[1U];
   uint32_t vd25 = abcd[2U];
-  uint8_t *b25 = x + (uint32_t)56U;
+  uint8_t *b25 = x + 56U;
   uint32_t u24 = load32_le(b25);
   uint32_t xk24 = u24;
   uint32_t ti25 = _t[25U];
@@ -474,14 +466,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb25
     +
       ((va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25)
-      << (uint32_t)9U
-      | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> (uint32_t)23U);
+      << 9U
+      | (va24 + ((vb25 & vd25) | (vc25 & ~vd25)) + xk24 + ti25) >> 23U);
   abcd[3U] = v24;
   uint32_t va25 = abcd[2U];
   uint32_t vb26 = abcd[3U];
   uint32_t vc26 = abcd[0U];
   uint32_t vd26 = abcd[1U];
-  uint8_t *b26 = x + (uint32_t)12U;
+  uint8_t *b26 = x + 12U;
   uint32_t u25 = load32_le(b26);
   uint32_t xk25 = u25;
   uint32_t ti26 = _t[26U];
@@ -490,14 +482,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb26
     +
       ((va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26)
-      << (uint32_t)14U
-      | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> (uint32_t)18U);
+      << 14U
+      | (va25 + ((vb26 & vd26) | (vc26 & ~vd26)) + xk25 + ti26) >> 18U);
   abcd[2U] = v25;
   uint32_t va26 = abcd[1U];
   uint32_t vb27 = abcd[2U];
   uint32_t vc27 = abcd[3U];
   uint32_t vd27 = abcd[0U];
-  uint8_t *b27 = x + (uint32_t)32U;
+  uint8_t *b27 = x + 32U;
   uint32_t u26 = load32_le(b27);
   uint32_t xk26 = u26;
   uint32_t ti27 = _t[27U];
@@ -506,14 +498,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb27
     +
       ((va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27)
-      << (uint32_t)20U
-      | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> (uint32_t)12U);
+      << 20U
+      | (va26 + ((vb27 & vd27) | (vc27 & ~vd27)) + xk26 + ti27) >> 12U);
   abcd[1U] = v26;
   uint32_t va27 = abcd[0U];
   uint32_t vb28 = abcd[1U];
   uint32_t vc28 = abcd[2U];
   uint32_t vd28 = abcd[3U];
-  uint8_t *b28 = x + (uint32_t)52U;
+  uint8_t *b28 = x + 52U;
   uint32_t u27 = load32_le(b28);
   uint32_t xk27 = u27;
   uint32_t ti28 = _t[28U];
@@ -522,14 +514,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb28
     +
       ((va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28)
-      << (uint32_t)5U
-      | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> (uint32_t)27U);
+      << 5U
+      | (va27 + ((vb28 & vd28) | (vc28 & ~vd28)) + xk27 + ti28) >> 27U);
   abcd[0U] = v27;
   uint32_t va28 = abcd[3U];
   uint32_t vb29 = abcd[0U];
   uint32_t vc29 = abcd[1U];
   uint32_t vd29 = abcd[2U];
-  uint8_t *b29 = x + (uint32_t)8U;
+  uint8_t *b29 = x + 8U;
   uint32_t u28 = load32_le(b29);
   uint32_t xk28 = u28;
   uint32_t ti29 = _t[29U];
@@ -538,14 +530,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb29
     +
       ((va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29)
-      << (uint32_t)9U
-      | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> (uint32_t)23U);
+      << 9U
+      | (va28 + ((vb29 & vd29) | (vc29 & ~vd29)) + xk28 + ti29) >> 23U);
   abcd[3U] = v28;
   uint32_t va29 = abcd[2U];
   uint32_t vb30 = abcd[3U];
   uint32_t vc30 = abcd[0U];
   uint32_t vd30 = abcd[1U];
-  uint8_t *b30 = x + (uint32_t)28U;
+  uint8_t *b30 = x + 28U;
   uint32_t u29 = load32_le(b30);
   uint32_t xk29 = u29;
   uint32_t ti30 = _t[30U];
@@ -554,14 +546,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb30
     +
       ((va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30)
-      << (uint32_t)14U
-      | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> (uint32_t)18U);
+      << 14U
+      | (va29 + ((vb30 & vd30) | (vc30 & ~vd30)) + xk29 + ti30) >> 18U);
   abcd[2U] = v29;
   uint32_t va30 = abcd[1U];
   uint32_t vb31 = abcd[2U];
   uint32_t vc31 = abcd[3U];
   uint32_t vd31 = abcd[0U];
-  uint8_t *b31 = x + (uint32_t)48U;
+  uint8_t *b31 = x + 48U;
   uint32_t u30 = load32_le(b31);
   uint32_t xk30 = u30;
   uint32_t ti31 = _t[31U];
@@ -570,14 +562,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb31
     +
       ((va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31)
-      << (uint32_t)20U
-      | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> (uint32_t)12U);
+      << 20U
+      | (va30 + ((vb31 & vd31) | (vc31 & ~vd31)) + xk30 + ti31) >> 12U);
   abcd[1U] = v30;
   uint32_t va31 = abcd[0U];
   uint32_t vb32 = abcd[1U];
   uint32_t vc32 = abcd[2U];
   uint32_t vd32 = abcd[3U];
-  uint8_t *b32 = x + (uint32_t)20U;
+  uint8_t *b32 = x + 20U;
   uint32_t u31 = load32_le(b32);
   uint32_t xk31 = u31;
   uint32_t ti32 = _t[32U];
@@ -586,14 +578,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb32
     +
       ((va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32)
-      << (uint32_t)4U
-      | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> (uint32_t)28U);
+      << 4U
+      | (va31 + (vb32 ^ (vc32 ^ vd32)) + xk31 + ti32) >> 28U);
   abcd[0U] = v31;
   uint32_t va32 = abcd[3U];
   uint32_t vb33 = abcd[0U];
   uint32_t vc33 = abcd[1U];
   uint32_t vd33 = abcd[2U];
-  uint8_t *b33 = x + (uint32_t)32U;
+  uint8_t *b33 = x + 32U;
   uint32_t u32 = load32_le(b33);
   uint32_t xk32 = u32;
   uint32_t ti33 = _t[33U];
@@ -602,14 +594,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb33
     +
       ((va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33)
-      << (uint32_t)11U
-      | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> (uint32_t)21U);
+      << 11U
+      | (va32 + (vb33 ^ (vc33 ^ vd33)) + xk32 + ti33) >> 21U);
   abcd[3U] = v32;
   uint32_t va33 = abcd[2U];
   uint32_t vb34 = abcd[3U];
   uint32_t vc34 = abcd[0U];
   uint32_t vd34 = abcd[1U];
-  uint8_t *b34 = x + (uint32_t)44U;
+  uint8_t *b34 = x + 44U;
   uint32_t u33 = load32_le(b34);
   uint32_t xk33 = u33;
   uint32_t ti34 = _t[34U];
@@ -618,14 +610,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb34
     +
       ((va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34)
-      << (uint32_t)16U
-      | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> (uint32_t)16U);
+      << 16U
+      | (va33 + (vb34 ^ (vc34 ^ vd34)) + xk33 + ti34) >> 16U);
   abcd[2U] = v33;
   uint32_t va34 = abcd[1U];
   uint32_t vb35 = abcd[2U];
   uint32_t vc35 = abcd[3U];
   uint32_t vd35 = abcd[0U];
-  uint8_t *b35 = x + (uint32_t)56U;
+  uint8_t *b35 = x + 56U;
   uint32_t u34 = load32_le(b35);
   uint32_t xk34 = u34;
   uint32_t ti35 = _t[35U];
@@ -634,14 +626,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb35
     +
       ((va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35)
-      << (uint32_t)23U
-      | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> (uint32_t)9U);
+      << 23U
+      | (va34 + (vb35 ^ (vc35 ^ vd35)) + xk34 + ti35) >> 9U);
   abcd[1U] = v34;
   uint32_t va35 = abcd[0U];
   uint32_t vb36 = abcd[1U];
   uint32_t vc36 = abcd[2U];
   uint32_t vd36 = abcd[3U];
-  uint8_t *b36 = x + (uint32_t)4U;
+  uint8_t *b36 = x + 4U;
   uint32_t u35 = load32_le(b36);
   uint32_t xk35 = u35;
   uint32_t ti36 = _t[36U];
@@ -650,14 +642,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb36
     +
       ((va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36)
-      << (uint32_t)4U
-      | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> (uint32_t)28U);
+      << 4U
+      | (va35 + (vb36 ^ (vc36 ^ vd36)) + xk35 + ti36) >> 28U);
   abcd[0U] = v35;
   uint32_t va36 = abcd[3U];
   uint32_t vb37 = abcd[0U];
   uint32_t vc37 = abcd[1U];
   uint32_t vd37 = abcd[2U];
-  uint8_t *b37 = x + (uint32_t)16U;
+  uint8_t *b37 = x + 16U;
   uint32_t u36 = load32_le(b37);
   uint32_t xk36 = u36;
   uint32_t ti37 = _t[37U];
@@ -666,14 +658,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb37
     +
       ((va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37)
-      << (uint32_t)11U
-      | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> (uint32_t)21U);
+      << 11U
+      | (va36 + (vb37 ^ (vc37 ^ vd37)) + xk36 + ti37) >> 21U);
   abcd[3U] = v36;
   uint32_t va37 = abcd[2U];
   uint32_t vb38 = abcd[3U];
   uint32_t vc38 = abcd[0U];
   uint32_t vd38 = abcd[1U];
-  uint8_t *b38 = x + (uint32_t)28U;
+  uint8_t *b38 = x + 28U;
   uint32_t u37 = load32_le(b38);
   uint32_t xk37 = u37;
   uint32_t ti38 = _t[38U];
@@ -682,14 +674,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb38
     +
       ((va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38)
-      << (uint32_t)16U
-      | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> (uint32_t)16U);
+      << 16U
+      | (va37 + (vb38 ^ (vc38 ^ vd38)) + xk37 + ti38) >> 16U);
   abcd[2U] = v37;
   uint32_t va38 = abcd[1U];
   uint32_t vb39 = abcd[2U];
   uint32_t vc39 = abcd[3U];
   uint32_t vd39 = abcd[0U];
-  uint8_t *b39 = x + (uint32_t)40U;
+  uint8_t *b39 = x + 40U;
   uint32_t u38 = load32_le(b39);
   uint32_t xk38 = u38;
   uint32_t ti39 = _t[39U];
@@ -698,14 +690,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb39
     +
       ((va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39)
-      << (uint32_t)23U
-      | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> (uint32_t)9U);
+      << 23U
+      | (va38 + (vb39 ^ (vc39 ^ vd39)) + xk38 + ti39) >> 9U);
   abcd[1U] = v38;
   uint32_t va39 = abcd[0U];
   uint32_t vb40 = abcd[1U];
   uint32_t vc40 = abcd[2U];
   uint32_t vd40 = abcd[3U];
-  uint8_t *b40 = x + (uint32_t)52U;
+  uint8_t *b40 = x + 52U;
   uint32_t u39 = load32_le(b40);
   uint32_t xk39 = u39;
   uint32_t ti40 = _t[40U];
@@ -714,8 +706,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb40
     +
       ((va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40)
-      << (uint32_t)4U
-      | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> (uint32_t)28U);
+      << 4U
+      | (va39 + (vb40 ^ (vc40 ^ vd40)) + xk39 + ti40) >> 28U);
   abcd[0U] = v39;
   uint32_t va40 = abcd[3U];
   uint32_t vb41 = abcd[0U];
@@ -730,14 +722,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb41
     +
       ((va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41)
-      << (uint32_t)11U
-      | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> (uint32_t)21U);
+      << 11U
+      | (va40 + (vb41 ^ (vc41 ^ vd41)) + xk40 + ti41) >> 21U);
   abcd[3U] = v40;
   uint32_t va41 = abcd[2U];
   uint32_t vb42 = abcd[3U];
   uint32_t vc42 = abcd[0U];
   uint32_t vd42 = abcd[1U];
-  uint8_t *b42 = x + (uint32_t)12U;
+  uint8_t *b42 = x + 12U;
   uint32_t u41 = load32_le(b42);
   uint32_t xk41 = u41;
   uint32_t ti42 = _t[42U];
@@ -746,14 +738,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb42
     +
       ((va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42)
-      << (uint32_t)16U
-      | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> (uint32_t)16U);
+      << 16U
+      | (va41 + (vb42 ^ (vc42 ^ vd42)) + xk41 + ti42) >> 16U);
   abcd[2U] = v41;
   uint32_t va42 = abcd[1U];
   uint32_t vb43 = abcd[2U];
   uint32_t vc43 = abcd[3U];
   uint32_t vd43 = abcd[0U];
-  uint8_t *b43 = x + (uint32_t)24U;
+  uint8_t *b43 = x + 24U;
   uint32_t u42 = load32_le(b43);
   uint32_t xk42 = u42;
   uint32_t ti43 = _t[43U];
@@ -762,14 +754,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb43
     +
       ((va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43)
-      << (uint32_t)23U
-      | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> (uint32_t)9U);
+      << 23U
+      | (va42 + (vb43 ^ (vc43 ^ vd43)) + xk42 + ti43) >> 9U);
   abcd[1U] = v42;
   uint32_t va43 = abcd[0U];
   uint32_t vb44 = abcd[1U];
   uint32_t vc44 = abcd[2U];
   uint32_t vd44 = abcd[3U];
-  uint8_t *b44 = x + (uint32_t)36U;
+  uint8_t *b44 = x + 36U;
   uint32_t u43 = load32_le(b44);
   uint32_t xk43 = u43;
   uint32_t ti44 = _t[44U];
@@ -778,14 +770,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb44
     +
       ((va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44)
-      << (uint32_t)4U
-      | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> (uint32_t)28U);
+      << 4U
+      | (va43 + (vb44 ^ (vc44 ^ vd44)) + xk43 + ti44) >> 28U);
   abcd[0U] = v43;
   uint32_t va44 = abcd[3U];
   uint32_t vb45 = abcd[0U];
   uint32_t vc45 = abcd[1U];
   uint32_t vd45 = abcd[2U];
-  uint8_t *b45 = x + (uint32_t)48U;
+  uint8_t *b45 = x + 48U;
   uint32_t u44 = load32_le(b45);
   uint32_t xk44 = u44;
   uint32_t ti45 = _t[45U];
@@ -794,14 +786,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb45
     +
       ((va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45)
-      << (uint32_t)11U
-      | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> (uint32_t)21U);
+      << 11U
+      | (va44 + (vb45 ^ (vc45 ^ vd45)) + xk44 + ti45) >> 21U);
   abcd[3U] = v44;
   uint32_t va45 = abcd[2U];
   uint32_t vb46 = abcd[3U];
   uint32_t vc46 = abcd[0U];
   uint32_t vd46 = abcd[1U];
-  uint8_t *b46 = x + (uint32_t)60U;
+  uint8_t *b46 = x + 60U;
   uint32_t u45 = load32_le(b46);
   uint32_t xk45 = u45;
   uint32_t ti46 = _t[46U];
@@ -810,14 +802,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb46
     +
       ((va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46)
-      << (uint32_t)16U
-      | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> (uint32_t)16U);
+      << 16U
+      | (va45 + (vb46 ^ (vc46 ^ vd46)) + xk45 + ti46) >> 16U);
   abcd[2U] = v45;
   uint32_t va46 = abcd[1U];
   uint32_t vb47 = abcd[2U];
   uint32_t vc47 = abcd[3U];
   uint32_t vd47 = abcd[0U];
-  uint8_t *b47 = x + (uint32_t)8U;
+  uint8_t *b47 = x + 8U;
   uint32_t u46 = load32_le(b47);
   uint32_t xk46 = u46;
   uint32_t ti47 = _t[47U];
@@ -826,8 +818,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb47
     +
       ((va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47)
-      << (uint32_t)23U
-      | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> (uint32_t)9U);
+      << 23U
+      | (va46 + (vb47 ^ (vc47 ^ vd47)) + xk46 + ti47) >> 9U);
   abcd[1U] = v46;
   uint32_t va47 = abcd[0U];
   uint32_t vb48 = abcd[1U];
@@ -842,14 +834,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb48
     +
       ((va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48)
-      << (uint32_t)6U
-      | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> (uint32_t)26U);
+      << 6U
+      | (va47 + (vc48 ^ (vb48 | ~vd48)) + xk47 + ti48) >> 26U);
   abcd[0U] = v47;
   uint32_t va48 = abcd[3U];
   uint32_t vb49 = abcd[0U];
   uint32_t vc49 = abcd[1U];
   uint32_t vd49 = abcd[2U];
-  uint8_t *b49 = x + (uint32_t)28U;
+  uint8_t *b49 = x + 28U;
   uint32_t u48 = load32_le(b49);
   uint32_t xk48 = u48;
   uint32_t ti49 = _t[49U];
@@ -858,14 +850,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb49
     +
       ((va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49)
-      << (uint32_t)10U
-      | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> (uint32_t)22U);
+      << 10U
+      | (va48 + (vc49 ^ (vb49 | ~vd49)) + xk48 + ti49) >> 22U);
   abcd[3U] = v48;
   uint32_t va49 = abcd[2U];
   uint32_t vb50 = abcd[3U];
   uint32_t vc50 = abcd[0U];
   uint32_t vd50 = abcd[1U];
-  uint8_t *b50 = x + (uint32_t)56U;
+  uint8_t *b50 = x + 56U;
   uint32_t u49 = load32_le(b50);
   uint32_t xk49 = u49;
   uint32_t ti50 = _t[50U];
@@ -874,14 +866,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb50
     +
       ((va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50)
-      << (uint32_t)15U
-      | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> (uint32_t)17U);
+      << 15U
+      | (va49 + (vc50 ^ (vb50 | ~vd50)) + xk49 + ti50) >> 17U);
   abcd[2U] = v49;
   uint32_t va50 = abcd[1U];
   uint32_t vb51 = abcd[2U];
   uint32_t vc51 = abcd[3U];
   uint32_t vd51 = abcd[0U];
-  uint8_t *b51 = x + (uint32_t)20U;
+  uint8_t *b51 = x + 20U;
   uint32_t u50 = load32_le(b51);
   uint32_t xk50 = u50;
   uint32_t ti51 = _t[51U];
@@ -890,14 +882,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb51
     +
       ((va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51)
-      << (uint32_t)21U
-      | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> (uint32_t)11U);
+      << 21U
+      | (va50 + (vc51 ^ (vb51 | ~vd51)) + xk50 + ti51) >> 11U);
   abcd[1U] = v50;
   uint32_t va51 = abcd[0U];
   uint32_t vb52 = abcd[1U];
   uint32_t vc52 = abcd[2U];
   uint32_t vd52 = abcd[3U];
-  uint8_t *b52 = x + (uint32_t)48U;
+  uint8_t *b52 = x + 48U;
   uint32_t u51 = load32_le(b52);
   uint32_t xk51 = u51;
   uint32_t ti52 = _t[52U];
@@ -906,14 +898,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb52
     +
       ((va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52)
-      << (uint32_t)6U
-      | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> (uint32_t)26U);
+      << 6U
+      | (va51 + (vc52 ^ (vb52 | ~vd52)) + xk51 + ti52) >> 26U);
   abcd[0U] = v51;
   uint32_t va52 = abcd[3U];
   uint32_t vb53 = abcd[0U];
   uint32_t vc53 = abcd[1U];
   uint32_t vd53 = abcd[2U];
-  uint8_t *b53 = x + (uint32_t)12U;
+  uint8_t *b53 = x + 12U;
   uint32_t u52 = load32_le(b53);
   uint32_t xk52 = u52;
   uint32_t ti53 = _t[53U];
@@ -922,14 +914,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb53
     +
       ((va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53)
-      << (uint32_t)10U
-      | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> (uint32_t)22U);
+      << 10U
+      | (va52 + (vc53 ^ (vb53 | ~vd53)) + xk52 + ti53) >> 22U);
   abcd[3U] = v52;
   uint32_t va53 = abcd[2U];
   uint32_t vb54 = abcd[3U];
   uint32_t vc54 = abcd[0U];
   uint32_t vd54 = abcd[1U];
-  uint8_t *b54 = x + (uint32_t)40U;
+  uint8_t *b54 = x + 40U;
   uint32_t u53 = load32_le(b54);
   uint32_t xk53 = u53;
   uint32_t ti54 = _t[54U];
@@ -938,14 +930,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb54
     +
       ((va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54)
-      << (uint32_t)15U
-      | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> (uint32_t)17U);
+      << 15U
+      | (va53 + (vc54 ^ (vb54 | ~vd54)) + xk53 + ti54) >> 17U);
   abcd[2U] = v53;
   uint32_t va54 = abcd[1U];
   uint32_t vb55 = abcd[2U];
   uint32_t vc55 = abcd[3U];
   uint32_t vd55 = abcd[0U];
-  uint8_t *b55 = x + (uint32_t)4U;
+  uint8_t *b55 = x + 4U;
   uint32_t u54 = load32_le(b55);
   uint32_t xk54 = u54;
   uint32_t ti55 = _t[55U];
@@ -954,14 +946,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb55
     +
       ((va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55)
-      << (uint32_t)21U
-      | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> (uint32_t)11U);
+      << 21U
+      | (va54 + (vc55 ^ (vb55 | ~vd55)) + xk54 + ti55) >> 11U);
   abcd[1U] = v54;
   uint32_t va55 = abcd[0U];
   uint32_t vb56 = abcd[1U];
   uint32_t vc56 = abcd[2U];
   uint32_t vd56 = abcd[3U];
-  uint8_t *b56 = x + (uint32_t)32U;
+  uint8_t *b56 = x + 32U;
   uint32_t u55 = load32_le(b56);
   uint32_t xk55 = u55;
   uint32_t ti56 = _t[56U];
@@ -970,14 +962,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb56
     +
       ((va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56)
-      << (uint32_t)6U
-      | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> (uint32_t)26U);
+      << 6U
+      | (va55 + (vc56 ^ (vb56 | ~vd56)) + xk55 + ti56) >> 26U);
   abcd[0U] = v55;
   uint32_t va56 = abcd[3U];
   uint32_t vb57 = abcd[0U];
   uint32_t vc57 = abcd[1U];
   uint32_t vd57 = abcd[2U];
-  uint8_t *b57 = x + (uint32_t)60U;
+  uint8_t *b57 = x + 60U;
   uint32_t u56 = load32_le(b57);
   uint32_t xk56 = u56;
   uint32_t ti57 = _t[57U];
@@ -986,14 +978,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb57
     +
       ((va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57)
-      << (uint32_t)10U
-      | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> (uint32_t)22U);
+      << 10U
+      | (va56 + (vc57 ^ (vb57 | ~vd57)) + xk56 + ti57) >> 22U);
   abcd[3U] = v56;
   uint32_t va57 = abcd[2U];
   uint32_t vb58 = abcd[3U];
   uint32_t vc58 = abcd[0U];
   uint32_t vd58 = abcd[1U];
-  uint8_t *b58 = x + (uint32_t)24U;
+  uint8_t *b58 = x + 24U;
   uint32_t u57 = load32_le(b58);
   uint32_t xk57 = u57;
   uint32_t ti58 = _t[58U];
@@ -1002,14 +994,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb58
     +
       ((va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58)
-      << (uint32_t)15U
-      | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> (uint32_t)17U);
+      << 15U
+      | (va57 + (vc58 ^ (vb58 | ~vd58)) + xk57 + ti58) >> 17U);
   abcd[2U] = v57;
   uint32_t va58 = abcd[1U];
   uint32_t vb59 = abcd[2U];
   uint32_t vc59 = abcd[3U];
   uint32_t vd59 = abcd[0U];
-  uint8_t *b59 = x + (uint32_t)52U;
+  uint8_t *b59 = x + 52U;
   uint32_t u58 = load32_le(b59);
   uint32_t xk58 = u58;
   uint32_t ti59 = _t[59U];
@@ -1018,14 +1010,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb59
     +
       ((va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59)
-      << (uint32_t)21U
-      | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> (uint32_t)11U);
+      << 21U
+      | (va58 + (vc59 ^ (vb59 | ~vd59)) + xk58 + ti59) >> 11U);
   abcd[1U] = v58;
   uint32_t va59 = abcd[0U];
   uint32_t vb60 = abcd[1U];
   uint32_t vc60 = abcd[2U];
   uint32_t vd60 = abcd[3U];
-  uint8_t *b60 = x + (uint32_t)16U;
+  uint8_t *b60 = x + 16U;
   uint32_t u59 = load32_le(b60);
   uint32_t xk59 = u59;
   uint32_t ti60 = _t[60U];
@@ -1034,14 +1026,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb60
     +
       ((va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60)
-      << (uint32_t)6U
-      | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> (uint32_t)26U);
+      << 6U
+      | (va59 + (vc60 ^ (vb60 | ~vd60)) + xk59 + ti60) >> 26U);
   abcd[0U] = v59;
   uint32_t va60 = abcd[3U];
   uint32_t vb61 = abcd[0U];
   uint32_t vc61 = abcd[1U];
   uint32_t vd61 = abcd[2U];
-  uint8_t *b61 = x + (uint32_t)44U;
+  uint8_t *b61 = x + 44U;
   uint32_t u60 = load32_le(b61);
   uint32_t xk60 = u60;
   uint32_t ti61 = _t[61U];
@@ -1050,14 +1042,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb61
     +
       ((va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61)
-      << (uint32_t)10U
-      | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> (uint32_t)22U);
+      << 10U
+      | (va60 + (vc61 ^ (vb61 | ~vd61)) + xk60 + ti61) >> 22U);
   abcd[3U] = v60;
   uint32_t va61 = abcd[2U];
   uint32_t vb62 = abcd[3U];
   uint32_t vc62 = abcd[0U];
   uint32_t vd62 = abcd[1U];
-  uint8_t *b62 = x + (uint32_t)8U;
+  uint8_t *b62 = x + 8U;
   uint32_t u61 = load32_le(b62);
   uint32_t xk61 = u61;
   uint32_t ti62 = _t[62U];
@@ -1066,14 +1058,14 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb62
     +
       ((va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62)
-      << (uint32_t)15U
-      | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> (uint32_t)17U);
+      << 15U
+      | (va61 + (vc62 ^ (vb62 | ~vd62)) + xk61 + ti62) >> 17U);
   abcd[2U] = v61;
   uint32_t va62 = abcd[1U];
   uint32_t vb = abcd[2U];
   uint32_t vc = abcd[3U];
   uint32_t vd = abcd[0U];
-  uint8_t *b63 = x + (uint32_t)36U;
+  uint8_t *b63 = x + 36U;
   uint32_t u62 = load32_le(b63);
   uint32_t xk62 = u62;
   uint32_t ti = _t[63U];
@@ -1082,8 +1074,8 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
     vb
     +
       ((va62 + (vc ^ (vb | ~vd)) + xk62 + ti)
-      << (uint32_t)21U
-      | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> (uint32_t)11U);
+      << 21U
+      | (va62 + (vc ^ (vb | ~vd)) + xk62 + ti) >> 11U);
   abcd[1U] = v62;
   uint32_t a = abcd[0U];
   uint32_t b = abcd[1U];
@@ -1095,98 +1087,69 @@ static void legacy_update(uint32_t *abcd, uint8_t *x)
   abcd[3U] = d + dd;
 }
 
-static void legacy_pad(uint64_t len, uint8_t *dst)
+static void pad(uint64_t len, uint8_t *dst)
 {
   uint8_t *dst1 = dst;
-  dst1[0U] = (uint8_t)0x80U;
-  uint8_t *dst2 = dst + (uint32_t)1U;
-  for
-  (uint32_t
-    i = (uint32_t)0U;
-    i
-    < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
-    i++)
+  dst1[0U] = 0x80U;
+  uint8_t *dst2 = dst + 1U;
+  for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
   {
-    dst2[i] = (uint8_t)0U;
+    dst2[i] = 0U;
   }
-  uint8_t
-  *dst3 =
-    dst
-    +
-      (uint32_t)1U
-      +
-        ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
-        % (uint32_t)64U;
-  store64_le(dst3, len << (uint32_t)3U);
+  uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+  store64_le(dst3, len << 3U);
 }
 
-void Hacl_Hash_Core_MD5_legacy_finish(uint32_t *s, uint8_t *dst)
+void Hacl_Hash_MD5_finish(uint32_t *s, uint8_t *dst)
 {
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store32_le(dst + i * (uint32_t)4U, s[i]););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store32_le(dst + i * 4U, s[i]););
 }
 
-void Hacl_Hash_MD5_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
+void Hacl_Hash_MD5_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
-    uint32_t sz = (uint32_t)64U;
+    uint32_t sz = 64U;
     uint8_t *block = blocks + sz * i;
-    legacy_update(s, block);
+    update(s, block);
   }
 }
 
 void
-Hacl_Hash_MD5_legacy_update_last(
-  uint32_t *s,
-  uint64_t prev_len,
-  uint8_t *input,
-  uint32_t input_len
-)
+Hacl_Hash_MD5_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len)
 {
-  uint32_t blocks_n = input_len / (uint32_t)64U;
-  uint32_t blocks_len = blocks_n * (uint32_t)64U;
+  uint32_t blocks_n = input_len / 64U;
+  uint32_t blocks_len = blocks_n * 64U;
   uint8_t *blocks = input;
   uint32_t rest_len = input_len - blocks_len;
   uint8_t *rest = input + blocks_len;
-  Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
+  Hacl_Hash_MD5_update_multi(s, blocks, blocks_n);
   uint64_t total_input_len = prev_len + (uint64_t)input_len;
-  uint32_t
-  pad_len =
-    (uint32_t)1U
-    +
-      ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
-      % (uint32_t)64U
-    + (uint32_t)8U;
+  uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
   uint32_t tmp_len = rest_len + pad_len;
   uint8_t tmp_twoblocks[128U] = { 0U };
   uint8_t *tmp = tmp_twoblocks;
   uint8_t *tmp_rest = tmp;
   uint8_t *tmp_pad = tmp + rest_len;
   memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
-  legacy_pad(total_input_len, tmp_pad);
-  Hacl_Hash_MD5_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+  pad(total_input_len, tmp_pad);
+  Hacl_Hash_MD5_update_multi(s, tmp, tmp_len / 64U);
 }
 
-void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_MD5_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
-  uint32_t
-  s[4U] =
-    { (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U };
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t s[4U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U };
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -1195,75 +1158,75 @@ void Hacl_Hash_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
   uint8_t *blocks = blocks0;
   uint32_t rest_len = rest_len0;
   uint8_t *rest = rest0;
-  Hacl_Hash_MD5_legacy_update_multi(s, blocks, blocks_n);
-  Hacl_Hash_MD5_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len);
-  Hacl_Hash_Core_MD5_legacy_finish(s, dst);
+  Hacl_Hash_MD5_update_multi(s, blocks, blocks_n);
+  Hacl_Hash_MD5_update_last(s, (uint64_t)blocks_len, rest, rest_len);
+  Hacl_Hash_MD5_finish(s, output);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_create_in(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_malloc(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
-  Hacl_Hash_Core_MD5_legacy_init(block_state);
+  Hacl_Hash_MD5_init(block_state);
   return p;
 }
 
-void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_MD5_reset(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
-  Hacl_Hash_Core_MD5_legacy_init(block_state);
+  Hacl_Hash_MD5_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+Hacl_Hash_MD5_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_MD_state_32 s = *p;
+  Hacl_Streaming_MD_state_32 s = *state;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (chunk_len <= 64U - sz)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -1273,74 +1236,74 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_MD5_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Hash_MD5_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Hacl_Hash_MD5_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -1349,114 +1312,109 @@ Hacl_Streaming_MD5_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, u
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_MD_state_32 s10 = *p;
+    Hacl_Streaming_MD_state_32 s10 = *state;
     uint32_t *block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_MD5_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_MD5_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
     if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Hash_MD5_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Hacl_Hash_MD5_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
   return Hacl_Streaming_Types_Success;
 }
 
-void Hacl_Streaming_MD5_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_MD5_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_32 scrut = *p;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[4U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)4U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 4U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Hash_MD5_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+  Hacl_Hash_MD5_update_multi(tmp_block_state, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_Hash_MD5_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
-  Hacl_Hash_Core_MD5_legacy_finish(tmp_block_state, dst);
+  Hacl_Hash_MD5_update_last(tmp_block_state, prev_len_last, buf_last, r);
+  Hacl_Hash_MD5_finish(tmp_block_state, output);
 }
 
-void Hacl_Streaming_MD5_legacy_free(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_MD5_free(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
   KRML_HOST_FREE(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_state_32 *s0)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_MD5_copy(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s0;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)4U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)4U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(4U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 4U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
@@ -1465,8 +1423,8 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_MD5_legacy_copy(Hacl_Streaming_MD_sta
   return p;
 }
 
-void Hacl_Streaming_MD5_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_MD5_hash(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
-  Hacl_Hash_MD5_legacy_hash(input, input_len, dst);
+  Hacl_Hash_MD5_hash_oneshot(output, input, input_len);
 }
 
diff --git a/src/msvc/Hacl_Hash_SHA1.c b/src/msvc/Hacl_Hash_SHA1.c
index 5ecb3c0b..1a8b09b1 100644
--- a/src/msvc/Hacl_Hash_SHA1.c
+++ b/src/msvc/Hacl_Hash_SHA1.c
@@ -25,19 +25,14 @@
 
 #include "internal/Hacl_Hash_SHA1.h"
 
-static uint32_t
-_h0[5U] =
-  {
-    (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-    (uint32_t)0xc3d2e1f0U
-  };
+static uint32_t _h0[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
 
-void Hacl_Hash_Core_SHA1_legacy_init(uint32_t *s)
+void Hacl_Hash_SHA1_init(uint32_t *s)
 {
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, s[i] = _h0[i];);
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i] = _h0[i];);
 }
 
-static void legacy_update(uint32_t *h, uint8_t *l)
+static void update(uint32_t *h, uint8_t *l)
 {
   uint32_t ha = h[0U];
   uint32_t hb = h[1U];
@@ -45,29 +40,26 @@ static void legacy_update(uint32_t *h, uint8_t *l)
   uint32_t hd = h[3U];
   uint32_t he = h[4U];
   uint32_t _w[80U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
     uint32_t v;
-    if (i < (uint32_t)16U)
+    if (i < 16U)
     {
-      uint8_t *b = l + i * (uint32_t)4U;
+      uint8_t *b = l + i * 4U;
       uint32_t u = load32_be(b);
       v = u;
     }
     else
     {
-      uint32_t wmit3 = _w[i - (uint32_t)3U];
-      uint32_t wmit8 = _w[i - (uint32_t)8U];
-      uint32_t wmit14 = _w[i - (uint32_t)14U];
-      uint32_t wmit16 = _w[i - (uint32_t)16U];
-      v =
-        (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16)))
-        << (uint32_t)1U
-        | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> (uint32_t)31U;
+      uint32_t wmit3 = _w[i - 3U];
+      uint32_t wmit8 = _w[i - 8U];
+      uint32_t wmit14 = _w[i - 14U];
+      uint32_t wmit16 = _w[i - 16U];
+      v = (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) << 1U | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))) >> 31U;
     }
     _w[i] = v;
   }
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
     uint32_t _a = h[0U];
     uint32_t _b = h[1U];
@@ -76,11 +68,11 @@ static void legacy_update(uint32_t *h, uint8_t *l)
     uint32_t _e = h[4U];
     uint32_t wmit = _w[i];
     uint32_t ite0;
-    if (i < (uint32_t)20U)
+    if (i < 20U)
     {
       ite0 = (_b & _c) ^ (~_b & _d);
     }
-    else if ((uint32_t)39U < i && i < (uint32_t)60U)
+    else if (39U < i && i < 60U)
     {
       ite0 = (_b & _c) ^ ((_b & _d) ^ (_c & _d));
     }
@@ -89,32 +81,32 @@ static void legacy_update(uint32_t *h, uint8_t *l)
       ite0 = _b ^ (_c ^ _d);
     }
     uint32_t ite;
-    if (i < (uint32_t)20U)
+    if (i < 20U)
     {
-      ite = (uint32_t)0x5a827999U;
+      ite = 0x5a827999U;
     }
-    else if (i < (uint32_t)40U)
+    else if (i < 40U)
     {
-      ite = (uint32_t)0x6ed9eba1U;
+      ite = 0x6ed9eba1U;
     }
-    else if (i < (uint32_t)60U)
+    else if (i < 60U)
     {
-      ite = (uint32_t)0x8f1bbcdcU;
+      ite = 0x8f1bbcdcU;
     }
     else
     {
-      ite = (uint32_t)0xca62c1d6U;
+      ite = 0xca62c1d6U;
     }
-    uint32_t _T = (_a << (uint32_t)5U | _a >> (uint32_t)27U) + ite0 + _e + ite + wmit;
+    uint32_t _T = (_a << 5U | _a >> 27U) + ite0 + _e + ite + wmit;
     h[0U] = _T;
     h[1U] = _a;
-    h[2U] = _b << (uint32_t)30U | _b >> (uint32_t)2U;
+    h[2U] = _b << 30U | _b >> 2U;
     h[3U] = _c;
     h[4U] = _d;
   }
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)80U; i++)
+  for (uint32_t i = 0U; i < 80U; i++)
   {
-    _w[i] = (uint32_t)0U;
+    _w[i] = 0U;
   }
   uint32_t sta = h[0U];
   uint32_t stb = h[1U];
@@ -128,101 +120,69 @@ static void legacy_update(uint32_t *h, uint8_t *l)
   h[4U] = ste + he;
 }
 
-static void legacy_pad(uint64_t len, uint8_t *dst)
+static void pad(uint64_t len, uint8_t *dst)
 {
   uint8_t *dst1 = dst;
-  dst1[0U] = (uint8_t)0x80U;
-  uint8_t *dst2 = dst + (uint32_t)1U;
-  for
-  (uint32_t
-    i = (uint32_t)0U;
-    i
-    < ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U))) % (uint32_t)64U;
-    i++)
+  dst1[0U] = 0x80U;
+  uint8_t *dst2 = dst + 1U;
+  for (uint32_t i = 0U; i < (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U; i++)
   {
-    dst2[i] = (uint8_t)0U;
+    dst2[i] = 0U;
   }
-  uint8_t
-  *dst3 =
-    dst
-    +
-      (uint32_t)1U
-      +
-        ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(len % (uint64_t)(uint32_t)64U)))
-        % (uint32_t)64U;
-  store64_be(dst3, len << (uint32_t)3U);
+  uint8_t *dst3 = dst + 1U + (128U - (9U + (uint32_t)(len % (uint64_t)64U))) % 64U;
+  store64_be(dst3, len << 3U);
 }
 
-void Hacl_Hash_Core_SHA1_legacy_finish(uint32_t *s, uint8_t *dst)
+void Hacl_Hash_SHA1_finish(uint32_t *s, uint8_t *dst)
 {
-  KRML_MAYBE_FOR5(i,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
-    store32_be(dst + i * (uint32_t)4U, s[i]););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, store32_be(dst + i * 4U, s[i]););
 }
 
-void Hacl_Hash_SHA1_legacy_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
+void Hacl_Hash_SHA1_update_multi(uint32_t *s, uint8_t *blocks, uint32_t n_blocks)
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
-    uint32_t sz = (uint32_t)64U;
+    uint32_t sz = 64U;
     uint8_t *block = blocks + sz * i;
-    legacy_update(s, block);
+    update(s, block);
   }
 }
 
 void
-Hacl_Hash_SHA1_legacy_update_last(
-  uint32_t *s,
-  uint64_t prev_len,
-  uint8_t *input,
-  uint32_t input_len
-)
+Hacl_Hash_SHA1_update_last(uint32_t *s, uint64_t prev_len, uint8_t *input, uint32_t input_len)
 {
-  uint32_t blocks_n = input_len / (uint32_t)64U;
-  uint32_t blocks_len = blocks_n * (uint32_t)64U;
+  uint32_t blocks_n = input_len / 64U;
+  uint32_t blocks_len = blocks_n * 64U;
   uint8_t *blocks = input;
   uint32_t rest_len = input_len - blocks_len;
   uint8_t *rest = input + blocks_len;
-  Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
+  Hacl_Hash_SHA1_update_multi(s, blocks, blocks_n);
   uint64_t total_input_len = prev_len + (uint64_t)input_len;
-  uint32_t
-  pad_len =
-    (uint32_t)1U
-    +
-      ((uint32_t)128U - ((uint32_t)9U + (uint32_t)(total_input_len % (uint64_t)(uint32_t)64U)))
-      % (uint32_t)64U
-    + (uint32_t)8U;
+  uint32_t pad_len = 1U + (128U - (9U + (uint32_t)(total_input_len % (uint64_t)64U))) % 64U + 8U;
   uint32_t tmp_len = rest_len + pad_len;
   uint8_t tmp_twoblocks[128U] = { 0U };
   uint8_t *tmp = tmp_twoblocks;
   uint8_t *tmp_rest = tmp;
   uint8_t *tmp_pad = tmp + rest_len;
   memcpy(tmp_rest, rest, rest_len * sizeof (uint8_t));
-  legacy_pad(total_input_len, tmp_pad);
-  Hacl_Hash_SHA1_legacy_update_multi(s, tmp, tmp_len / (uint32_t)64U);
+  pad(total_input_len, tmp_pad);
+  Hacl_Hash_SHA1_update_multi(s, tmp, tmp_len / 64U);
 }
 
-void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA1_hash_oneshot(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
-  uint32_t
-  s[5U] =
-    {
-      (uint32_t)0x67452301U, (uint32_t)0xefcdab89U, (uint32_t)0x98badcfeU, (uint32_t)0x10325476U,
-      (uint32_t)0xc3d2e1f0U
-    };
-  uint32_t blocks_n0 = input_len / (uint32_t)64U;
+  uint32_t s[5U] = { 0x67452301U, 0xefcdab89U, 0x98badcfeU, 0x10325476U, 0xc3d2e1f0U };
+  uint32_t blocks_n0 = input_len / 64U;
   uint32_t blocks_n1;
-  if (input_len % (uint32_t)64U == (uint32_t)0U && blocks_n0 > (uint32_t)0U)
+  if (input_len % 64U == 0U && blocks_n0 > 0U)
   {
-    blocks_n1 = blocks_n0 - (uint32_t)1U;
+    blocks_n1 = blocks_n0 - 1U;
   }
   else
   {
     blocks_n1 = blocks_n0;
   }
-  uint32_t blocks_len0 = blocks_n1 * (uint32_t)64U;
+  uint32_t blocks_len0 = blocks_n1 * 64U;
   uint8_t *blocks0 = input;
   uint32_t rest_len0 = input_len - blocks_len0;
   uint8_t *rest0 = input + blocks_len0;
@@ -231,75 +191,75 @@ void Hacl_Hash_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst
   uint8_t *blocks = blocks0;
   uint32_t rest_len = rest_len0;
   uint8_t *rest = rest0;
-  Hacl_Hash_SHA1_legacy_update_multi(s, blocks, blocks_n);
-  Hacl_Hash_SHA1_legacy_update_last(s, (uint64_t)blocks_len, rest, rest_len);
-  Hacl_Hash_Core_SHA1_legacy_finish(s, dst);
+  Hacl_Hash_SHA1_update_multi(s, blocks, blocks_n);
+  Hacl_Hash_SHA1_update_last(s, (uint64_t)blocks_len, rest, rest_len);
+  Hacl_Hash_SHA1_finish(s, output);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_create_in(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_malloc(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
-  Hacl_Hash_Core_SHA1_legacy_init(block_state);
+  Hacl_Hash_SHA1_init(block_state);
   return p;
 }
 
-void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA1_reset(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
-  Hacl_Hash_Core_SHA1_legacy_init(block_state);
+  Hacl_Hash_SHA1_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 /**
 0 = success, 1 = max length exceeded
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+Hacl_Hash_SHA1_update(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_MD_state_32 s = *p;
+  Hacl_Streaming_MD_state_32 s = *state;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (chunk_len <= 64U - sz)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -309,74 +269,74 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_SHA1_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data1, data1_len / (uint32_t)64U);
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Hacl_Hash_SHA1_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -385,114 +345,109 @@ Hacl_Streaming_SHA1_legacy_update(Hacl_Streaming_MD_state_32 *p, uint8_t *data,
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_MD_state_32 s10 = *p;
+    Hacl_Streaming_MD_state_32 s10 = *state;
     uint32_t *block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_Hash_SHA1_legacy_update_multi(block_state1, buf, (uint32_t)1U);
+      Hacl_Hash_SHA1_update_multi(block_state1, buf, 1U);
     }
     uint32_t ite;
     if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Hash_SHA1_legacy_update_multi(block_state1, data11, data1_len / (uint32_t)64U);
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Hacl_Hash_SHA1_update_multi(block_state1, data1, data1_len / 64U);
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
   return Hacl_Streaming_Types_Success;
 }
 
-void Hacl_Streaming_SHA1_legacy_finish(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_SHA1_digest(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_32 scrut = *p;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[5U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)5U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 5U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_Hash_SHA1_legacy_update_multi(tmp_block_state, buf_multi, (uint32_t)0U);
+  Hacl_Hash_SHA1_update_multi(tmp_block_state, buf_multi, 0U);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_Hash_SHA1_legacy_update_last(tmp_block_state, prev_len_last, buf_last, r);
-  Hacl_Hash_Core_SHA1_legacy_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA1_update_last(tmp_block_state, prev_len_last, buf_last, r);
+  Hacl_Hash_SHA1_finish(tmp_block_state, output);
 }
 
-void Hacl_Streaming_SHA1_legacy_free(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA1_free(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
   KRML_HOST_FREE(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_state_32 *s0)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA1_copy(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s0;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)5U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)5U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(5U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 5U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
@@ -501,8 +456,8 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA1_legacy_copy(Hacl_Streaming_MD_st
   return p;
 }
 
-void Hacl_Streaming_SHA1_legacy_hash(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA1_hash(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
-  Hacl_Hash_SHA1_legacy_hash(input, input_len, dst);
+  Hacl_Hash_SHA1_hash_oneshot(output, input, input_len);
 }
 
diff --git a/src/msvc/Hacl_Hash_SHA2.c b/src/msvc/Hacl_Hash_SHA2.c
index c93c3616..995fe707 100644
--- a/src/msvc/Hacl_Hash_SHA2.c
+++ b/src/msvc/Hacl_Hash_SHA2.c
@@ -27,14 +27,14 @@
 
 #include "internal/Hacl_Krmllib.h"
 
-void Hacl_SHA2_Scalar32_sha256_init(uint32_t *hash)
+void Hacl_Hash_SHA2_sha256_init(uint32_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t x = Hacl_Hash_SHA2_h256[i];
     os[i] = x;);
 }
 
@@ -42,49 +42,49 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
 {
   uint32_t hash_old[8U] = { 0U };
   uint32_t ws[16U] = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(hash_old, hash, 8U * sizeof (uint32_t));
   uint8_t *b10 = b;
   uint32_t u = load32_be(b10);
   ws[0U] = u;
-  uint32_t u0 = load32_be(b10 + (uint32_t)4U);
+  uint32_t u0 = load32_be(b10 + 4U);
   ws[1U] = u0;
-  uint32_t u1 = load32_be(b10 + (uint32_t)8U);
+  uint32_t u1 = load32_be(b10 + 8U);
   ws[2U] = u1;
-  uint32_t u2 = load32_be(b10 + (uint32_t)12U);
+  uint32_t u2 = load32_be(b10 + 12U);
   ws[3U] = u2;
-  uint32_t u3 = load32_be(b10 + (uint32_t)16U);
+  uint32_t u3 = load32_be(b10 + 16U);
   ws[4U] = u3;
-  uint32_t u4 = load32_be(b10 + (uint32_t)20U);
+  uint32_t u4 = load32_be(b10 + 20U);
   ws[5U] = u4;
-  uint32_t u5 = load32_be(b10 + (uint32_t)24U);
+  uint32_t u5 = load32_be(b10 + 24U);
   ws[6U] = u5;
-  uint32_t u6 = load32_be(b10 + (uint32_t)28U);
+  uint32_t u6 = load32_be(b10 + 28U);
   ws[7U] = u6;
-  uint32_t u7 = load32_be(b10 + (uint32_t)32U);
+  uint32_t u7 = load32_be(b10 + 32U);
   ws[8U] = u7;
-  uint32_t u8 = load32_be(b10 + (uint32_t)36U);
+  uint32_t u8 = load32_be(b10 + 36U);
   ws[9U] = u8;
-  uint32_t u9 = load32_be(b10 + (uint32_t)40U);
+  uint32_t u9 = load32_be(b10 + 40U);
   ws[10U] = u9;
-  uint32_t u10 = load32_be(b10 + (uint32_t)44U);
+  uint32_t u10 = load32_be(b10 + 44U);
   ws[11U] = u10;
-  uint32_t u11 = load32_be(b10 + (uint32_t)48U);
+  uint32_t u11 = load32_be(b10 + 48U);
   ws[12U] = u11;
-  uint32_t u12 = load32_be(b10 + (uint32_t)52U);
+  uint32_t u12 = load32_be(b10 + 52U);
   ws[13U] = u12;
-  uint32_t u13 = load32_be(b10 + (uint32_t)56U);
+  uint32_t u13 = load32_be(b10 + 56U);
   ws[14U] = u13;
-  uint32_t u14 = load32_be(b10 + (uint32_t)60U);
+  uint32_t u14 = load32_be(b10 + 60U);
   ws[15U] = u14;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       uint32_t ws_t = ws[i];
       uint32_t a0 = hash[0U];
       uint32_t b0 = hash[1U];
@@ -98,20 +98,13 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
       uint32_t
       t1 =
         h02
-        +
-          ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U)
-          ^
-            ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U)
-            ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U)))
+        + ((e0 << 26U | e0 >> 6U) ^ ((e0 << 21U | e0 >> 11U) ^ (e0 << 7U | e0 >> 25U)))
         + ((e0 & f0) ^ (~e0 & g0))
         + k_e_t
         + ws_t;
       uint32_t
       t2 =
-        ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U)
-        ^
-          ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U)
-          ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U)))
+        ((a0 << 30U | a0 >> 2U) ^ ((a0 << 19U | a0 >> 13U) ^ (a0 << 10U | a0 >> 22U)))
         + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
       uint32_t a1 = t1 + t2;
       uint32_t b1 = a0;
@@ -129,74 +122,63 @@ static inline void sha256_update(uint8_t *b, uint32_t *hash)
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         uint32_t t16 = ws[i];
-        uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
-        uint32_t
-        s1 =
-          (t2 << (uint32_t)15U | t2 >> (uint32_t)17U)
-          ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U);
-        uint32_t
-        s0 =
-          (t15 << (uint32_t)25U | t15 >> (uint32_t)7U)
-          ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U);
+        uint32_t t15 = ws[(i + 1U) % 16U];
+        uint32_t t7 = ws[(i + 9U) % 16U];
+        uint32_t t2 = ws[(i + 14U) % 16U];
+        uint32_t s1 = (t2 << 15U | t2 >> 17U) ^ ((t2 << 13U | t2 >> 19U) ^ t2 >> 10U);
+        uint32_t s0 = (t15 << 25U | t15 >> 7U) ^ ((t15 << 14U | t15 >> 18U) ^ t15 >> 3U);
         ws[i] = s1 + t7 + s0 + t16;);
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
     uint32_t x = hash[i] + hash_old[i];
     os[i] = x;);
 }
 
-void Hacl_SHA2_Scalar32_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
+void Hacl_Hash_SHA2_sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b0 = b;
-    uint8_t *mb = b0 + i * (uint32_t)64U;
+    uint8_t *mb = b0 + i * 64U;
     sha256_update(mb, st);
   }
 }
 
 void
-Hacl_SHA2_Scalar32_sha256_update_last(
-  uint64_t totlen,
-  uint32_t len,
-  uint8_t *b,
-  uint32_t *hash
-)
+Hacl_Hash_SHA2_sha256_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *hash)
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[128U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b0 = b;
   memcpy(last, b0, len * sizeof (uint8_t));
-  last[len] = (uint8_t)0x80U;
-  memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last[len] = 0x80U;
+  memcpy(last + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)64U;
+  uint8_t *last10 = last + 64U;
   uint8_t *l0 = last00;
   uint8_t *l1 = last10;
   uint8_t *lb0 = l0;
@@ -204,65 +186,56 @@ Hacl_SHA2_Scalar32_sha256_update_last(
   uint8_t *last0 = lb0;
   uint8_t *last1 = lb1;
   sha256_update(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update(last1, hash);
     return;
   }
 }
 
-void Hacl_SHA2_Scalar32_sha256_finish(uint32_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha256_finish(uint32_t *st, uint8_t *h)
 {
   uint8_t hbuf[32U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(hbuf + i * (uint32_t)4U, st[i]););
-  memcpy(h, hbuf, (uint32_t)32U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+  memcpy(h, hbuf, 32U * sizeof (uint8_t));
 }
 
-void Hacl_SHA2_Scalar32_sha224_init(uint32_t *hash)
+void Hacl_Hash_SHA2_sha224_init(uint32_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = hash;
-    uint32_t x = Hacl_Impl_SHA2_Generic_h224[i];
+    uint32_t x = Hacl_Hash_SHA2_h224[i];
     os[i] = x;);
 }
 
 static inline void sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st)
 {
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(len, b, st);
+  Hacl_Hash_SHA2_sha256_update_nblocks(len, b, st);
 }
 
-void
-Hacl_SHA2_Scalar32_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st)
+void Hacl_Hash_SHA2_sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st)
 {
-  Hacl_SHA2_Scalar32_sha256_update_last(totlen, len, b, st);
+  Hacl_Hash_SHA2_sha256_update_last(totlen, len, b, st);
 }
 
-void Hacl_SHA2_Scalar32_sha224_finish(uint32_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha224_finish(uint32_t *st, uint8_t *h)
 {
   uint8_t hbuf[32U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_be(hbuf + i * (uint32_t)4U, st[i]););
-  memcpy(h, hbuf, (uint32_t)28U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_be(hbuf + i * 4U, st[i]););
+  memcpy(h, hbuf, 28U * sizeof (uint8_t));
 }
 
-void Hacl_SHA2_Scalar32_sha512_init(uint64_t *hash)
+void Hacl_Hash_SHA2_sha512_init(uint64_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h512[i];
+    uint64_t x = Hacl_Hash_SHA2_h512[i];
     os[i] = x;);
 }
 
@@ -270,49 +243,49 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
 {
   uint64_t hash_old[8U] = { 0U };
   uint64_t ws[16U] = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(hash_old, hash, 8U * sizeof (uint64_t));
   uint8_t *b10 = b;
   uint64_t u = load64_be(b10);
   ws[0U] = u;
-  uint64_t u0 = load64_be(b10 + (uint32_t)8U);
+  uint64_t u0 = load64_be(b10 + 8U);
   ws[1U] = u0;
-  uint64_t u1 = load64_be(b10 + (uint32_t)16U);
+  uint64_t u1 = load64_be(b10 + 16U);
   ws[2U] = u1;
-  uint64_t u2 = load64_be(b10 + (uint32_t)24U);
+  uint64_t u2 = load64_be(b10 + 24U);
   ws[3U] = u2;
-  uint64_t u3 = load64_be(b10 + (uint32_t)32U);
+  uint64_t u3 = load64_be(b10 + 32U);
   ws[4U] = u3;
-  uint64_t u4 = load64_be(b10 + (uint32_t)40U);
+  uint64_t u4 = load64_be(b10 + 40U);
   ws[5U] = u4;
-  uint64_t u5 = load64_be(b10 + (uint32_t)48U);
+  uint64_t u5 = load64_be(b10 + 48U);
   ws[6U] = u5;
-  uint64_t u6 = load64_be(b10 + (uint32_t)56U);
+  uint64_t u6 = load64_be(b10 + 56U);
   ws[7U] = u6;
-  uint64_t u7 = load64_be(b10 + (uint32_t)64U);
+  uint64_t u7 = load64_be(b10 + 64U);
   ws[8U] = u7;
-  uint64_t u8 = load64_be(b10 + (uint32_t)72U);
+  uint64_t u8 = load64_be(b10 + 72U);
   ws[9U] = u8;
-  uint64_t u9 = load64_be(b10 + (uint32_t)80U);
+  uint64_t u9 = load64_be(b10 + 80U);
   ws[10U] = u9;
-  uint64_t u10 = load64_be(b10 + (uint32_t)88U);
+  uint64_t u10 = load64_be(b10 + 88U);
   ws[11U] = u10;
-  uint64_t u11 = load64_be(b10 + (uint32_t)96U);
+  uint64_t u11 = load64_be(b10 + 96U);
   ws[12U] = u11;
-  uint64_t u12 = load64_be(b10 + (uint32_t)104U);
+  uint64_t u12 = load64_be(b10 + 104U);
   ws[13U] = u12;
-  uint64_t u13 = load64_be(b10 + (uint32_t)112U);
+  uint64_t u13 = load64_be(b10 + 112U);
   ws[14U] = u13;
-  uint64_t u14 = load64_be(b10 + (uint32_t)120U);
+  uint64_t u14 = load64_be(b10 + 120U);
   ws[15U] = u14;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Hash_SHA2_k384_512[16U * i0 + i];
       uint64_t ws_t = ws[i];
       uint64_t a0 = hash[0U];
       uint64_t b0 = hash[1U];
@@ -326,20 +299,13 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
       uint64_t
       t1 =
         h02
-        +
-          ((e0 << (uint32_t)50U | e0 >> (uint32_t)14U)
-          ^
-            ((e0 << (uint32_t)46U | e0 >> (uint32_t)18U)
-            ^ (e0 << (uint32_t)23U | e0 >> (uint32_t)41U)))
+        + ((e0 << 50U | e0 >> 14U) ^ ((e0 << 46U | e0 >> 18U) ^ (e0 << 23U | e0 >> 41U)))
         + ((e0 & f0) ^ (~e0 & g0))
         + k_e_t
         + ws_t;
       uint64_t
       t2 =
-        ((a0 << (uint32_t)36U | a0 >> (uint32_t)28U)
-        ^
-          ((a0 << (uint32_t)30U | a0 >> (uint32_t)34U)
-          ^ (a0 << (uint32_t)25U | a0 >> (uint32_t)39U)))
+        ((a0 << 36U | a0 >> 28U) ^ ((a0 << 30U | a0 >> 34U) ^ (a0 << 25U | a0 >> 39U)))
         + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0)));
       uint64_t a1 = t1 + t2;
       uint64_t b1 = a0;
@@ -357,48 +323,42 @@ static inline void sha512_update(uint8_t *b, uint64_t *hash)
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         uint64_t t16 = ws[i];
-        uint64_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        uint64_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        uint64_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
-        uint64_t
-        s1 =
-          (t2 << (uint32_t)45U | t2 >> (uint32_t)19U)
-          ^ ((t2 << (uint32_t)3U | t2 >> (uint32_t)61U) ^ t2 >> (uint32_t)6U);
-        uint64_t
-        s0 =
-          (t15 << (uint32_t)63U | t15 >> (uint32_t)1U)
-          ^ ((t15 << (uint32_t)56U | t15 >> (uint32_t)8U) ^ t15 >> (uint32_t)7U);
+        uint64_t t15 = ws[(i + 1U) % 16U];
+        uint64_t t7 = ws[(i + 9U) % 16U];
+        uint64_t t2 = ws[(i + 14U) % 16U];
+        uint64_t s1 = (t2 << 45U | t2 >> 19U) ^ ((t2 << 3U | t2 >> 61U) ^ t2 >> 6U);
+        uint64_t s0 = (t15 << 63U | t15 >> 1U) ^ ((t15 << 56U | t15 >> 8U) ^ t15 >> 7U);
         ws[i] = s1 + t7 + s0 + t16;);
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
     uint64_t x = hash[i] + hash_old[i];
     os[i] = x;);
 }
 
-void Hacl_SHA2_Scalar32_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
+void Hacl_Hash_SHA2_sha512_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b0 = b;
-    uint8_t *mb = b0 + i * (uint32_t)128U;
+    uint8_t *mb = b0 + i * 128U;
     sha512_update(mb, st);
   }
 }
 
 void
-Hacl_SHA2_Scalar32_sha512_update_last(
+Hacl_Hash_SHA2_sha512_update_last(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
   uint8_t *b,
@@ -406,25 +366,25 @@ Hacl_SHA2_Scalar32_sha512_update_last(
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[256U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b0 = b;
   memcpy(last, b0, len * sizeof (uint8_t));
-  last[len] = (uint8_t)0x80U;
-  memcpy(last + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last[len] = 0x80U;
+  memcpy(last + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
+  uint8_t *last10 = last + 128U;
   uint8_t *l0 = last00;
   uint8_t *l1 = last10;
   uint8_t *lb0 = l0;
@@ -432,76 +392,68 @@ Hacl_SHA2_Scalar32_sha512_update_last(
   uint8_t *last0 = lb0;
   uint8_t *last1 = lb1;
   sha512_update(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha512_update(last1, hash);
     return;
   }
 }
 
-void Hacl_SHA2_Scalar32_sha512_finish(uint64_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha512_finish(uint64_t *st, uint8_t *h)
 {
   uint8_t hbuf[64U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store64_be(hbuf + i * (uint32_t)8U, st[i]););
-  memcpy(h, hbuf, (uint32_t)64U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+  memcpy(h, hbuf, 64U * sizeof (uint8_t));
 }
 
-void Hacl_SHA2_Scalar32_sha384_init(uint64_t *hash)
+void Hacl_Hash_SHA2_sha384_init(uint64_t *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint64_t *os = hash;
-    uint64_t x = Hacl_Impl_SHA2_Generic_h384[i];
+    uint64_t x = Hacl_Hash_SHA2_h384[i];
     os[i] = x;);
 }
 
-void Hacl_SHA2_Scalar32_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
+void Hacl_Hash_SHA2_sha384_update_nblocks(uint32_t len, uint8_t *b, uint64_t *st)
 {
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(len, b, st);
+  Hacl_Hash_SHA2_sha512_update_nblocks(len, b, st);
 }
 
 void
-Hacl_SHA2_Scalar32_sha384_update_last(
+Hacl_Hash_SHA2_sha384_update_last(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
   uint8_t *b,
   uint64_t *st
 )
 {
-  Hacl_SHA2_Scalar32_sha512_update_last(totlen, len, b, st);
+  Hacl_Hash_SHA2_sha512_update_last(totlen, len, b, st);
 }
 
-void Hacl_SHA2_Scalar32_sha384_finish(uint64_t *st, uint8_t *h)
+void Hacl_Hash_SHA2_sha384_finish(uint64_t *st, uint8_t *h)
 {
   uint8_t hbuf[64U] = { 0U };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store64_be(hbuf + i * (uint32_t)8U, st[i]););
-  memcpy(h, hbuf, (uint32_t)48U * sizeof (uint8_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store64_be(hbuf + i * 8U, st[i]););
+  memcpy(h, hbuf, 48U * sizeof (uint8_t));
 }
 
 /**
 Allocate initial state for the SHA2_256 hash. The state is to be freed by
 calling `free_256`.
 */
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_256(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_256(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
-  Hacl_SHA2_Scalar32_sha256_init(block_state);
+  Hacl_Hash_SHA2_sha256_init(block_state);
   return p;
 }
 
@@ -511,16 +463,16 @@ The state is to be freed by calling `free_256`. Cloning the state this way is
 useful, for instance, if your control-flow diverges and you need to feed
 more (different) data into the hash in each branch.
 */
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state_32 *s0)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_copy_256(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s0;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
-  memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  memcpy(buf, buf0, 64U * sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
+  memcpy(block_state, block_state0, 8U * sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_32
@@ -532,54 +484,54 @@ Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_MD_state
 /**
 Reset an existing state to the initial hash state with empty data.
 */
-void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA2_reset_256(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
-  Hacl_SHA2_Scalar32_sha256_init(block_state);
+  Hacl_Hash_SHA2_sha256_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 static inline Hacl_Streaming_Types_error_code
-update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
+update_224_256(Hacl_Streaming_MD_state_32 *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_MD_state_32 s = *p;
+  Hacl_Streaming_MD_state_32 s = *state;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len)
+  if ((uint64_t)chunk_len > 2305843009213693951ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)64U;
+    sz = 64U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    sz = (uint32_t)(total_len % (uint64_t)64U);
   }
-  if (len <= (uint32_t)64U - sz)
+  if (chunk_len <= 64U - sz)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -589,76 +541,74 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+      Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf, block_state1);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
-      data1,
-      block_state1);
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_MD_state_32 s1 = *p;
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Streaming_MD_state_32 s1 = *state;
     uint32_t *block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)64U;
+      sz10 = 64U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
@@ -667,55 +617,48 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_MD_state_32 s10 = *p;
+    Hacl_Streaming_MD_state_32 s10 = *state;
     uint32_t *block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)64U;
+      sz1 = 64U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)64U, buf, block_state1);
+      Hacl_Hash_SHA2_sha256_update_nblocks(64U, buf, block_state1);
     }
     uint32_t ite;
     if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
     {
-      ite = (uint32_t)64U;
+      ite = 64U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_SHA2_Scalar32_sha256_update_nblocks(data1_len / (uint32_t)64U * (uint32_t)64U,
-      data11,
-      block_state1);
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Hacl_Hash_SHA2_sha256_update_nblocks(data1_len / 64U * 64U, data1, block_state1);
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
         (Hacl_Streaming_MD_state_32){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
@@ -725,209 +668,203 @@ update_224_256(Hacl_Streaming_MD_state_32 *p, uint8_t *data, uint32_t len)
 /**
 Feed an arbitrary amount of data into the hash. This function returns 0 for
 success, or 1 if the combined length of all of the data passed to `update_256`
-(since the last call to `init_256`) exceeds 2^61-1 bytes.
+(since the last call to `reset_256`) exceeds 2^61-1 bytes.
 
 This function is identical to the update function for SHA2_224.
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_256(
-  Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_256(
+  Hacl_Streaming_MD_state_32 *state,
   uint8_t *input,
   uint32_t input_len
 )
 {
-  return update_224_256(p, input, input_len);
+  return update_224_256(state, input, input_len);
 }
 
 /**
-Write the resulting hash into `dst`, an array of 32 bytes. The state remains
-valid after a call to `finish_256`, meaning the user may feed more data into
-the hash via `update_256`. (The finish_256 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 32 bytes. The state remains
+valid after a call to `digest_256`, meaning the user may feed more data into
+the hash via `update_256`. (The digest_256 function operates on an internal copy of
 the state and therefore does not invalidate the client-held state `p`.)
 */
-void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_256(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_32 scrut = *p;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_Hash_SHA2_sha256_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_SHA2_Scalar32_sha256_update_last(prev_len_last + (uint64_t)r,
-    r,
-    buf_last,
-    tmp_block_state);
-  Hacl_SHA2_Scalar32_sha256_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA2_sha256_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state);
+  Hacl_Hash_SHA2_sha256_finish(tmp_block_state, output);
 }
 
 /**
-Free a state allocated with `create_in_256`.
+Free a state allocated with `malloc_256`.
 
 This function is identical to the free function for SHA2_224.
 */
-void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA2_free_256(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
   KRML_HOST_FREE(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 32 bytes.
 */
-void Hacl_Streaming_SHA2_hash_256(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_256(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint8_t *ib = input;
-  uint8_t *rb = dst;
+  uint8_t *rb = output;
   uint32_t st[8U] = { 0U };
-  Hacl_SHA2_Scalar32_sha256_init(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  Hacl_Hash_SHA2_sha256_init(st);
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
-  Hacl_SHA2_Scalar32_sha256_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  Hacl_Hash_SHA2_sha256_update_nblocks(input_len, ib, st);
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
-  Hacl_SHA2_Scalar32_sha256_update_last(len_, rem, lb, st);
-  Hacl_SHA2_Scalar32_sha256_finish(st, rb);
+  Hacl_Hash_SHA2_sha256_update_last(len_, rem, lb, st);
+  Hacl_Hash_SHA2_sha256_finish(st, rb);
 }
 
-Hacl_Streaming_MD_state_32 *Hacl_Streaming_SHA2_create_in_224(void)
+Hacl_Streaming_MD_state_32 *Hacl_Hash_SHA2_malloc_224(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC(8U, sizeof (uint32_t));
   Hacl_Streaming_MD_state_32
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_32
   *p = (Hacl_Streaming_MD_state_32 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_32));
   p[0U] = s;
-  Hacl_SHA2_Scalar32_sha224_init(block_state);
+  Hacl_Hash_SHA2_sha224_init(block_state);
   return p;
 }
 
-void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s)
+void Hacl_Hash_SHA2_reset_224(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_MD_state_32 scrut = *s;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint32_t *block_state = scrut.block_state;
-  Hacl_SHA2_Scalar32_sha224_init(block_state);
+  Hacl_Hash_SHA2_sha224_init(block_state);
   Hacl_Streaming_MD_state_32
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_224(
-  Hacl_Streaming_MD_state_32 *p,
+Hacl_Hash_SHA2_update_224(
+  Hacl_Streaming_MD_state_32 *state,
   uint8_t *input,
   uint32_t input_len
 )
 {
-  return update_224_256(p, input, input_len);
+  return update_224_256(state, input, input_len);
 }
 
 /**
-Write the resulting hash into `dst`, an array of 28 bytes. The state remains
-valid after a call to `finish_224`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 28 bytes. The state remains
+valid after a call to `digest_224`, meaning the user may feed more data into
 the hash via `update_224`.
 */
-void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_MD_state_32 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_224(Hacl_Streaming_MD_state_32 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_32 scrut = *p;
+  Hacl_Streaming_MD_state_32 scrut = *state;
   uint32_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)64U;
+    r = 64U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
+    r = (uint32_t)(total_len % (uint64_t)64U);
   }
   uint8_t *buf_1 = buf_;
   uint32_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint32_t));
   uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 64U == 0U && r > 0U)
   {
-    ite = (uint32_t)64U;
+    ite = 64U;
   }
   else
   {
-    ite = r % (uint32_t)64U;
+    ite = r % 64U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  sha224_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  sha224_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_SHA2_Scalar32_sha224_update_last(prev_len_last + (uint64_t)r,
-    r,
-    buf_last,
-    tmp_block_state);
-  Hacl_SHA2_Scalar32_sha224_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA2_sha224_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state);
+  Hacl_Hash_SHA2_sha224_finish(tmp_block_state, output);
 }
 
-void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_MD_state_32 *p)
+void Hacl_Hash_SHA2_free_224(Hacl_Streaming_MD_state_32 *state)
 {
-  Hacl_Streaming_SHA2_free_256(p);
+  Hacl_Hash_SHA2_free_256(state);
 }
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 28 bytes.
 */
-void Hacl_Streaming_SHA2_hash_224(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_224(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint8_t *ib = input;
-  uint8_t *rb = dst;
+  uint8_t *rb = output;
   uint32_t st[8U] = { 0U };
-  Hacl_SHA2_Scalar32_sha224_init(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  Hacl_Hash_SHA2_sha224_init(st);
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
-  Hacl_SHA2_Scalar32_sha224_update_last(len_, rem, lb, st);
-  Hacl_SHA2_Scalar32_sha224_finish(st, rb);
+  Hacl_Hash_SHA2_sha224_update_last(len_, rem, lb, st);
+  Hacl_Hash_SHA2_sha224_finish(st, rb);
 }
 
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_512(void)
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_512(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64
   *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
   p[0U] = s;
-  Hacl_SHA2_Scalar32_sha512_init(block_state);
+  Hacl_Hash_SHA2_sha512_init(block_state);
   return p;
 }
 
@@ -937,16 +874,16 @@ The state is to be freed by calling `free_512`. Cloning the state this way is
 useful, for instance, if your control-flow diverges and you need to feed
 more (different) data into the hash in each branch.
 */
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state_64 *s0)
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_copy_512(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_MD_state_64 scrut = *s0;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint64_t *block_state0 = scrut.block_state;
   uint8_t *buf0 = scrut.buf;
   uint64_t total_len0 = scrut.total_len;
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  memcpy(buf, buf0, (uint32_t)128U * sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
-  memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  memcpy(buf, buf0, 128U * sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
+  memcpy(block_state, block_state0, 8U * sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
   s = { .block_state = block_state, .buf = buf, .total_len = total_len0 };
   Hacl_Streaming_MD_state_64
@@ -955,54 +892,54 @@ Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_copy_512(Hacl_Streaming_MD_state
   return p;
 }
 
-void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s)
+void Hacl_Hash_SHA2_reset_512(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_MD_state_64 scrut = *s;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint64_t *block_state = scrut.block_state;
-  Hacl_SHA2_Scalar32_sha512_init(block_state);
+  Hacl_Hash_SHA2_sha512_init(block_state);
   Hacl_Streaming_MD_state_64
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 static inline Hacl_Streaming_Types_error_code
-update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
+update_384_512(Hacl_Streaming_MD_state_64 *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_MD_state_64 s = *p;
+  Hacl_Streaming_MD_state_64 s = *state;
   uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)18446744073709551615U - total_len)
+  if ((uint64_t)chunk_len > 18446744073709551615ULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    sz = (uint32_t)128U;
+    sz = 128U;
   }
   else
   {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    sz = (uint32_t)(total_len % (uint64_t)128U);
   }
-  if (len <= (uint32_t)128U - sz)
+  if (chunk_len <= 128U - sz)
   {
-    Hacl_Streaming_MD_state_64 s1 = *p;
+    Hacl_Streaming_MD_state_64 s1 = *state;
     uint64_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
       (
         (Hacl_Streaming_MD_state_64){
@@ -1012,76 +949,74 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
         }
       );
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_MD_state_64 s1 = *p;
+    Hacl_Streaming_MD_state_64 s1 = *state;
     uint64_t *block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+      Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf, block_state1);
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)128U == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
-      data1,
-      block_state1);
+    uint32_t n_blocks = (chunk_len - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1);
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_64){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
-    uint32_t diff = (uint32_t)128U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_MD_state_64 s1 = *p;
+    uint32_t diff = 128U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Streaming_MD_state_64 s1 = *state;
     uint64_t *block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)128U == 0ULL && total_len10 > 0ULL)
     {
-      sz10 = (uint32_t)128U;
+      sz10 = 128U;
     }
     else
     {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
+      sz10 = (uint32_t)(total_len10 % (uint64_t)128U);
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
         (Hacl_Streaming_MD_state_64){
@@ -1090,55 +1025,48 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_MD_state_64 s10 = *p;
+    Hacl_Streaming_MD_state_64 s10 = *state;
     uint64_t *block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)128U == 0ULL && total_len1 > 0ULL)
     {
-      sz1 = (uint32_t)128U;
+      sz1 = 128U;
     }
     else
     {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
+      sz1 = (uint32_t)(total_len1 % (uint64_t)128U);
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
-      Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)128U, buf, block_state1);
+      Hacl_Hash_SHA2_sha512_update_nblocks(128U, buf, block_state1);
     }
     uint32_t ite;
     if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
+    ((uint64_t)(chunk_len - diff) % (uint64_t)128U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
     {
-      ite = (uint32_t)128U;
+      ite = 128U;
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)128U);
     }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_SHA2_Scalar32_sha512_update_nblocks(data1_len / (uint32_t)128U * (uint32_t)128U,
-      data11,
-      block_state1);
+    uint32_t n_blocks = (chunk_len - diff - ite) / 128U;
+    uint32_t data1_len = n_blocks * 128U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    Hacl_Hash_SHA2_sha512_update_nblocks(data1_len / 128U * 128U, data1, block_state1);
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
         (Hacl_Streaming_MD_state_64){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
@@ -1148,198 +1076,198 @@ update_384_512(Hacl_Streaming_MD_state_64 *p, uint8_t *data, uint32_t len)
 /**
 Feed an arbitrary amount of data into the hash. This function returns 0 for
 success, or 1 if the combined length of all of the data passed to `update_512`
-(since the last call to `init_512`) exceeds 2^125-1 bytes.
+(since the last call to `reset_512`) exceeds 2^125-1 bytes.
 
 This function is identical to the update function for SHA2_384.
 */
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_512(
-  Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_512(
+  Hacl_Streaming_MD_state_64 *state,
   uint8_t *input,
   uint32_t input_len
 )
 {
-  return update_384_512(p, input, input_len);
+  return update_384_512(state, input, input_len);
 }
 
 /**
-Write the resulting hash into `dst`, an array of 64 bytes. The state remains
-valid after a call to `finish_512`, meaning the user may feed more data into
-the hash via `update_512`. (The finish_512 function operates on an internal copy of
+Write the resulting hash into `output`, an array of 64 bytes. The state remains
+valid after a call to `digest_512`, meaning the user may feed more data into
+the hash via `update_512`. (The digest_512 function operates on an internal copy of
 the state and therefore does not invalidate the client-held state `p`.)
 */
-void Hacl_Streaming_SHA2_finish_512(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_512(Hacl_Streaming_MD_state_64 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_64 scrut = *p;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint64_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha512_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_Hash_SHA2_sha512_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_SHA2_Scalar32_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
+  Hacl_Hash_SHA2_sha512_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
       FStar_UInt128_uint64_to_uint128((uint64_t)r)),
     r,
     buf_last,
     tmp_block_state);
-  Hacl_SHA2_Scalar32_sha512_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA2_sha512_finish(tmp_block_state, output);
 }
 
 /**
-Free a state allocated with `create_in_512`.
+Free a state allocated with `malloc_512`.
 
 This function is identical to the free function for SHA2_384.
 */
-void Hacl_Streaming_SHA2_free_512(Hacl_Streaming_MD_state_64 *s)
+void Hacl_Hash_SHA2_free_512(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_MD_state_64 scrut = *s;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint64_t *block_state = scrut.block_state;
   KRML_HOST_FREE(block_state);
   KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
+  KRML_HOST_FREE(state);
 }
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 64 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 64 bytes.
 */
-void Hacl_Streaming_SHA2_hash_512(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_512(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint8_t *ib = input;
-  uint8_t *rb = dst;
+  uint8_t *rb = output;
   uint64_t st[8U] = { 0U };
-  Hacl_SHA2_Scalar32_sha512_init(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  Hacl_Hash_SHA2_sha512_init(st);
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
-  Hacl_SHA2_Scalar32_sha512_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  Hacl_Hash_SHA2_sha512_update_nblocks(input_len, ib, st);
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
-  Hacl_SHA2_Scalar32_sha512_update_last(len_, rem, lb, st);
-  Hacl_SHA2_Scalar32_sha512_finish(st, rb);
+  Hacl_Hash_SHA2_sha512_update_last(len_, rem, lb, st);
+  Hacl_Hash_SHA2_sha512_finish(st, rb);
 }
 
-Hacl_Streaming_MD_state_64 *Hacl_Streaming_SHA2_create_in_384(void)
+Hacl_Streaming_MD_state_64 *Hacl_Hash_SHA2_malloc_384(void)
 {
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint64_t));
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(128U, sizeof (uint8_t));
+  uint64_t *block_state = (uint64_t *)KRML_HOST_CALLOC(8U, sizeof (uint64_t));
   Hacl_Streaming_MD_state_64
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
   Hacl_Streaming_MD_state_64
   *p = (Hacl_Streaming_MD_state_64 *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_MD_state_64));
   p[0U] = s;
-  Hacl_SHA2_Scalar32_sha384_init(block_state);
+  Hacl_Hash_SHA2_sha384_init(block_state);
   return p;
 }
 
-void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s)
+void Hacl_Hash_SHA2_reset_384(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_MD_state_64 scrut = *s;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint8_t *buf = scrut.buf;
   uint64_t *block_state = scrut.block_state;
-  Hacl_SHA2_Scalar32_sha384_init(block_state);
+  Hacl_Hash_SHA2_sha384_init(block_state);
   Hacl_Streaming_MD_state_64
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_SHA2_update_384(
-  Hacl_Streaming_MD_state_64 *p,
+Hacl_Hash_SHA2_update_384(
+  Hacl_Streaming_MD_state_64 *state,
   uint8_t *input,
   uint32_t input_len
 )
 {
-  return update_384_512(p, input, input_len);
+  return update_384_512(state, input, input_len);
 }
 
 /**
-Write the resulting hash into `dst`, an array of 48 bytes. The state remains
-valid after a call to `finish_384`, meaning the user may feed more data into
+Write the resulting hash into `output`, an array of 48 bytes. The state remains
+valid after a call to `digest_384`, meaning the user may feed more data into
 the hash via `update_384`.
 */
-void Hacl_Streaming_SHA2_finish_384(Hacl_Streaming_MD_state_64 *p, uint8_t *dst)
+void Hacl_Hash_SHA2_digest_384(Hacl_Streaming_MD_state_64 *state, uint8_t *output)
 {
-  Hacl_Streaming_MD_state_64 scrut = *p;
+  Hacl_Streaming_MD_state_64 scrut = *state;
   uint64_t *block_state = scrut.block_state;
   uint8_t *buf_ = scrut.buf;
   uint64_t total_len = scrut.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)128U == 0ULL && total_len > 0ULL)
   {
-    r = (uint32_t)128U;
+    r = 128U;
   }
   else
   {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
+    r = (uint32_t)(total_len % (uint64_t)128U);
   }
   uint8_t *buf_1 = buf_;
   uint64_t tmp_block_state[8U] = { 0U };
-  memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint64_t));
+  memcpy(tmp_block_state, block_state, 8U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % 128U == 0U && r > 0U)
   {
-    ite = (uint32_t)128U;
+    ite = 128U;
   }
   else
   {
-    ite = r % (uint32_t)128U;
+    ite = r % 128U;
   }
   uint8_t *buf_last = buf_1 + r - ite;
   uint8_t *buf_multi = buf_1;
-  Hacl_SHA2_Scalar32_sha384_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state);
+  Hacl_Hash_SHA2_sha384_update_nblocks(0U, buf_multi, tmp_block_state);
   uint64_t prev_len_last = total_len - (uint64_t)r;
-  Hacl_SHA2_Scalar32_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
+  Hacl_Hash_SHA2_sha384_update_last(FStar_UInt128_add(FStar_UInt128_uint64_to_uint128(prev_len_last),
       FStar_UInt128_uint64_to_uint128((uint64_t)r)),
     r,
     buf_last,
     tmp_block_state);
-  Hacl_SHA2_Scalar32_sha384_finish(tmp_block_state, dst);
+  Hacl_Hash_SHA2_sha384_finish(tmp_block_state, output);
 }
 
-void Hacl_Streaming_SHA2_free_384(Hacl_Streaming_MD_state_64 *p)
+void Hacl_Hash_SHA2_free_384(Hacl_Streaming_MD_state_64 *state)
 {
-  Hacl_Streaming_SHA2_free_512(p);
+  Hacl_Hash_SHA2_free_512(state);
 }
 
 /**
-Hash `input`, of len `input_len`, into `dst`, an array of 48 bytes.
+Hash `input`, of len `input_len`, into `output`, an array of 48 bytes.
 */
-void Hacl_Streaming_SHA2_hash_384(uint8_t *input, uint32_t input_len, uint8_t *dst)
+void Hacl_Hash_SHA2_hash_384(uint8_t *output, uint8_t *input, uint32_t input_len)
 {
   uint8_t *ib = input;
-  uint8_t *rb = dst;
+  uint8_t *rb = output;
   uint64_t st[8U] = { 0U };
-  Hacl_SHA2_Scalar32_sha384_init(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  Hacl_Hash_SHA2_sha384_init(st);
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
-  Hacl_SHA2_Scalar32_sha384_update_nblocks(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  Hacl_Hash_SHA2_sha384_update_nblocks(input_len, ib, st);
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b0 = ib;
   uint8_t *lb = b0 + input_len - rem1;
-  Hacl_SHA2_Scalar32_sha384_update_last(len_, rem, lb, st);
-  Hacl_SHA2_Scalar32_sha384_finish(st, rb);
+  Hacl_Hash_SHA2_sha384_update_last(len_, rem, lb, st);
+  Hacl_Hash_SHA2_sha384_finish(st, rb);
 }
 
diff --git a/src/msvc/Hacl_Hash_SHA3.c b/src/msvc/Hacl_Hash_SHA3.c
index 19d13b1b..1b821d07 100644
--- a/src/msvc/Hacl_Hash_SHA3.c
+++ b/src/msvc/Hacl_Hash_SHA3.c
@@ -31,27 +31,27 @@ static uint32_t block_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)144U;
+        return 144U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)104U;
+        return 104U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)72U;
+        return 72U;
       }
     case Spec_Hash_Definitions_Shake128:
       {
-        return (uint32_t)168U;
+        return 168U;
       }
     case Spec_Hash_Definitions_Shake256:
       {
-        return (uint32_t)136U;
+        return 136U;
       }
     default:
       {
@@ -67,19 +67,19 @@ static uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
@@ -97,10 +97,10 @@ Hacl_Hash_SHA3_update_multi_sha3(
   uint32_t n_blocks
 )
 {
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
     uint8_t *block = blocks + i * block_len(a);
-    Hacl_Impl_SHA3_absorb_inner(block_len(a), block, s);
+    Hacl_Hash_SHA3_absorb_inner(block_len(a), block, s);
   }
 }
 
@@ -115,139 +115,139 @@ Hacl_Hash_SHA3_update_last_sha3(
   uint8_t suffix;
   if (a == Spec_Hash_Definitions_Shake128 || a == Spec_Hash_Definitions_Shake256)
   {
-    suffix = (uint8_t)0x1fU;
+    suffix = 0x1fU;
   }
   else
   {
-    suffix = (uint8_t)0x06U;
+    suffix = 0x06U;
   }
   uint32_t len = block_len(a);
   if (input_len == len)
   {
-    Hacl_Impl_SHA3_absorb_inner(len, input, s);
+    Hacl_Hash_SHA3_absorb_inner(len, input, s);
     uint8_t lastBlock_[200U] = { 0U };
     uint8_t *lastBlock = lastBlock_;
-    memcpy(lastBlock, input + input_len, (uint32_t)0U * sizeof (uint8_t));
+    memcpy(lastBlock, input + input_len, 0U * sizeof (uint8_t));
     lastBlock[0U] = suffix;
-    Hacl_Impl_SHA3_loadState(len, lastBlock, s);
-    if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && (uint32_t)0U == len - (uint32_t)1U)
+    Hacl_Hash_SHA3_loadState(len, lastBlock, s);
+    if (!(((uint32_t)suffix & 0x80U) == 0U) && 0U == len - 1U)
     {
-      Hacl_Impl_SHA3_state_permute(s);
+      Hacl_Hash_SHA3_state_permute(s);
     }
     uint8_t nextBlock_[200U] = { 0U };
     uint8_t *nextBlock = nextBlock_;
-    nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
-    Hacl_Impl_SHA3_loadState(len, nextBlock, s);
-    Hacl_Impl_SHA3_state_permute(s);
+    nextBlock[len - 1U] = 0x80U;
+    Hacl_Hash_SHA3_loadState(len, nextBlock, s);
+    Hacl_Hash_SHA3_state_permute(s);
     return;
   }
   uint8_t lastBlock_[200U] = { 0U };
   uint8_t *lastBlock = lastBlock_;
   memcpy(lastBlock, input, input_len * sizeof (uint8_t));
   lastBlock[input_len] = suffix;
-  Hacl_Impl_SHA3_loadState(len, lastBlock, s);
-  if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && input_len == len - (uint32_t)1U)
+  Hacl_Hash_SHA3_loadState(len, lastBlock, s);
+  if (!(((uint32_t)suffix & 0x80U) == 0U) && input_len == len - 1U)
   {
-    Hacl_Impl_SHA3_state_permute(s);
+    Hacl_Hash_SHA3_state_permute(s);
   }
   uint8_t nextBlock_[200U] = { 0U };
   uint8_t *nextBlock = nextBlock_;
-  nextBlock[len - (uint32_t)1U] = (uint8_t)0x80U;
-  Hacl_Impl_SHA3_loadState(len, nextBlock, s);
-  Hacl_Impl_SHA3_state_permute(s);
+  nextBlock[len - 1U] = 0x80U;
+  Hacl_Hash_SHA3_loadState(len, nextBlock, s);
+  Hacl_Hash_SHA3_state_permute(s);
 }
 
 typedef struct hash_buf2_s
 {
-  Hacl_Streaming_Keccak_hash_buf fst;
-  Hacl_Streaming_Keccak_hash_buf snd;
+  Hacl_Hash_SHA3_hash_buf fst;
+  Hacl_Hash_SHA3_hash_buf snd;
 }
 hash_buf2;
 
-Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s)
+Spec_Hash_Definitions_hash_alg Hacl_Hash_SHA3_get_alg(Hacl_Hash_SHA3_state_t *s)
 {
-  Hacl_Streaming_Keccak_hash_buf block_state = (*s).block_state;
+  Hacl_Hash_SHA3_hash_buf block_state = (*s).block_state;
   return block_state.fst;
 }
 
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_malloc(Spec_Hash_Definitions_hash_alg a)
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_malloc(Spec_Hash_Definitions_hash_alg a)
 {
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(a));
   uint8_t *buf0 = (uint8_t *)KRML_HOST_CALLOC(block_len(a), sizeof (uint8_t));
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
-  Hacl_Streaming_Keccak_hash_buf block_state = { .fst = a, .snd = buf };
-  Hacl_Streaming_Keccak_state
-  s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Keccak_state
-  *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
+  Hacl_Hash_SHA3_hash_buf block_state = { .fst = a, .snd = buf };
+  Hacl_Hash_SHA3_state_t
+  s = { .block_state = block_state, .buf = buf0, .total_len = (uint64_t)0U };
+  Hacl_Hash_SHA3_state_t
+  *p = (Hacl_Hash_SHA3_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_SHA3_state_t));
   p[0U] = s;
   uint64_t *s1 = block_state.snd;
-  memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
+  memset(s1, 0U, 25U * sizeof (uint64_t));
   return p;
 }
 
-void Hacl_Streaming_Keccak_free(Hacl_Streaming_Keccak_state *s)
+void Hacl_Hash_SHA3_free(Hacl_Hash_SHA3_state_t *state)
 {
-  Hacl_Streaming_Keccak_state scrut = *s;
+  Hacl_Hash_SHA3_state_t scrut = *state;
   uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
-  uint64_t *s1 = block_state.snd;
-  KRML_HOST_FREE(s1);
-  KRML_HOST_FREE(buf);
+  Hacl_Hash_SHA3_hash_buf block_state = scrut.block_state;
+  uint64_t *s = block_state.snd;
   KRML_HOST_FREE(s);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
 }
 
-Hacl_Streaming_Keccak_state *Hacl_Streaming_Keccak_copy(Hacl_Streaming_Keccak_state *s0)
+Hacl_Hash_SHA3_state_t *Hacl_Hash_SHA3_copy(Hacl_Hash_SHA3_state_t *state)
 {
-  Hacl_Streaming_Keccak_state scrut0 = *s0;
-  Hacl_Streaming_Keccak_hash_buf block_state0 = scrut0.block_state;
+  Hacl_Hash_SHA3_state_t scrut0 = *state;
+  Hacl_Hash_SHA3_hash_buf block_state0 = scrut0.block_state;
   uint8_t *buf0 = scrut0.buf;
   uint64_t total_len0 = scrut0.total_len;
   Spec_Hash_Definitions_hash_alg i = block_state0.fst;
   KRML_CHECK_SIZE(sizeof (uint8_t), block_len(i));
   uint8_t *buf1 = (uint8_t *)KRML_HOST_CALLOC(block_len(i), sizeof (uint8_t));
   memcpy(buf1, buf0, block_len(i) * sizeof (uint8_t));
-  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
-  Hacl_Streaming_Keccak_hash_buf block_state = { .fst = i, .snd = buf };
+  uint64_t *buf = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
+  Hacl_Hash_SHA3_hash_buf block_state = { .fst = i, .snd = buf };
   hash_buf2 scrut = { .fst = block_state0, .snd = block_state };
   uint64_t *s_dst = scrut.snd.snd;
   uint64_t *s_src = scrut.fst.snd;
-  memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
-  Hacl_Streaming_Keccak_state
+  memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
+  Hacl_Hash_SHA3_state_t
   s = { .block_state = block_state, .buf = buf1, .total_len = total_len0 };
-  Hacl_Streaming_Keccak_state
-  *p = (Hacl_Streaming_Keccak_state *)KRML_HOST_MALLOC(sizeof (Hacl_Streaming_Keccak_state));
+  Hacl_Hash_SHA3_state_t
+  *p = (Hacl_Hash_SHA3_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_Hash_SHA3_state_t));
   p[0U] = s;
   return p;
 }
 
-void Hacl_Streaming_Keccak_reset(Hacl_Streaming_Keccak_state *s)
+void Hacl_Hash_SHA3_reset(Hacl_Hash_SHA3_state_t *state)
 {
-  Hacl_Streaming_Keccak_state scrut = *s;
+  Hacl_Hash_SHA3_state_t scrut = *state;
   uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state;
+  Hacl_Hash_SHA3_hash_buf block_state = scrut.block_state;
   Spec_Hash_Definitions_hash_alg i = block_state.fst;
-  KRML_HOST_IGNORE(i);
-  uint64_t *s1 = block_state.snd;
-  memset(s1, 0U, (uint32_t)25U * sizeof (uint64_t));
-  Hacl_Streaming_Keccak_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
+  KRML_MAYBE_UNUSED_VAR(i);
+  uint64_t *s = block_state.snd;
+  memset(s, 0U, 25U * sizeof (uint64_t));
+  Hacl_Hash_SHA3_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U };
+  state[0U] = tmp;
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint32_t len)
+Hacl_Hash_SHA3_update(Hacl_Hash_SHA3_state_t *state, uint8_t *chunk, uint32_t chunk_len)
 {
-  Hacl_Streaming_Keccak_state s = *p;
-  Hacl_Streaming_Keccak_hash_buf block_state = s.block_state;
+  Hacl_Hash_SHA3_state_t s = *state;
+  Hacl_Hash_SHA3_hash_buf block_state = s.block_state;
   uint64_t total_len = s.total_len;
   Spec_Hash_Definitions_hash_alg i = block_state.fst;
-  if ((uint64_t)len > (uint64_t)0xFFFFFFFFFFFFFFFFU - total_len)
+  if ((uint64_t)chunk_len > 0xFFFFFFFFFFFFFFFFULL - total_len)
   {
     return Hacl_Streaming_Types_MaximumLengthExceeded;
   }
   uint32_t sz;
-  if (total_len % (uint64_t)block_len(i) == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)block_len(i) == 0ULL && total_len > 0ULL)
   {
     sz = block_len(i);
   }
@@ -255,14 +255,14 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
   {
     sz = (uint32_t)(total_len % (uint64_t)block_len(i));
   }
-  if (len <= block_len(i) - sz)
+  if (chunk_len <= block_len(i) - sz)
   {
-    Hacl_Streaming_Keccak_state s1 = *p;
-    Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
+    Hacl_Hash_SHA3_state_t s1 = *state;
+    Hacl_Hash_SHA3_hash_buf block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -271,26 +271,20 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
     uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
     =
-      (
-        (Hacl_Streaming_Keccak_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
+      ((Hacl_Hash_SHA3_state_t){ .block_state = block_state1, .buf = buf, .total_len = total_len2 });
   }
-  else if (sz == (uint32_t)0U)
+  else if (sz == 0U)
   {
-    Hacl_Streaming_Keccak_state s1 = *p;
-    Hacl_Streaming_Keccak_hash_buf block_state1 = s1.block_state;
+    Hacl_Hash_SHA3_state_t s1 = *state;
+    Hacl_Hash_SHA3_hash_buf block_state1 = s1.block_state;
     uint8_t *buf = s1.buf;
     uint64_t total_len1 = s1.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -298,52 +292,52 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     {
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
       uint64_t *s2 = block_state1.snd;
       Hacl_Hash_SHA3_update_multi_sha3(a1, s2, buf, block_len(i) / block_len(a1));
     }
     uint32_t ite;
-    if ((uint64_t)len % (uint64_t)block_len(i) == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
+    if ((uint64_t)chunk_len % (uint64_t)block_len(i) == 0ULL && (uint64_t)chunk_len > 0ULL)
     {
       ite = block_len(i);
     }
     else
     {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)block_len(i));
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)block_len(i));
     }
-    uint32_t n_blocks = (len - ite) / block_len(i);
+    uint32_t n_blocks = (chunk_len - ite) / block_len(i);
     uint32_t data1_len = n_blocks * block_len(i);
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
     Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
     uint64_t *s2 = block_state1.snd;
     Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, data1_len / block_len(a1));
     uint8_t *dst = buf;
     memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
+    *state
     =
       (
-        (Hacl_Streaming_Keccak_state){
+        (Hacl_Hash_SHA3_state_t){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
+          .total_len = total_len1 + (uint64_t)chunk_len
         }
       );
   }
   else
   {
     uint32_t diff = block_len(i) - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Keccak_state s1 = *p;
-    Hacl_Streaming_Keccak_hash_buf block_state10 = s1.block_state;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_Hash_SHA3_state_t s1 = *state;
+    Hacl_Hash_SHA3_hash_buf block_state10 = s1.block_state;
     uint8_t *buf0 = s1.buf;
     uint64_t total_len10 = s1.total_len;
     uint32_t sz10;
-    if (total_len10 % (uint64_t)block_len(i) == (uint64_t)0U && total_len10 > (uint64_t)0U)
+    if (total_len10 % (uint64_t)block_len(i) == 0ULL && total_len10 > 0ULL)
     {
       sz10 = block_len(i);
     }
@@ -352,23 +346,23 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
       sz10 = (uint32_t)(total_len10 % (uint64_t)block_len(i));
     }
     uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
     uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
+    *state
     =
       (
-        (Hacl_Streaming_Keccak_state){
+        (Hacl_Hash_SHA3_state_t){
           .block_state = block_state10,
           .buf = buf0,
           .total_len = total_len2
         }
       );
-    Hacl_Streaming_Keccak_state s10 = *p;
-    Hacl_Streaming_Keccak_hash_buf block_state1 = s10.block_state;
+    Hacl_Hash_SHA3_state_t s10 = *state;
+    Hacl_Hash_SHA3_hash_buf block_state1 = s10.block_state;
     uint8_t *buf = s10.buf;
     uint64_t total_len1 = s10.total_len;
     uint32_t sz1;
-    if (total_len1 % (uint64_t)block_len(i) == (uint64_t)0U && total_len1 > (uint64_t)0U)
+    if (total_len1 % (uint64_t)block_len(i) == 0ULL && total_len1 > 0ULL)
     {
       sz1 = block_len(i);
     }
@@ -376,7 +370,7 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     {
       sz1 = (uint32_t)(total_len1 % (uint64_t)block_len(i));
     }
-    if (!(sz1 == (uint32_t)0U))
+    if (!(sz1 == 0U))
     {
       Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
       uint64_t *s2 = block_state1.snd;
@@ -385,35 +379,35 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
     uint32_t ite;
     if
     (
-      (uint64_t)(len - diff)
+      (uint64_t)(chunk_len - diff)
       % (uint64_t)block_len(i)
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
+      == 0ULL
+      && (uint64_t)(chunk_len - diff) > 0ULL
     )
     {
       ite = block_len(i);
     }
     else
     {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)block_len(i));
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)block_len(i));
     }
-    uint32_t n_blocks = (len - diff - ite) / block_len(i);
+    uint32_t n_blocks = (chunk_len - diff - ite) / block_len(i);
     uint32_t data1_len = n_blocks * block_len(i);
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
     Spec_Hash_Definitions_hash_alg a1 = block_state1.fst;
     uint64_t *s2 = block_state1.snd;
-    Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data11, data1_len / block_len(a1));
+    Hacl_Hash_SHA3_update_multi_sha3(a1, s2, data1, data1_len / block_len(a1));
     uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
     =
       (
-        (Hacl_Streaming_Keccak_state){
+        (Hacl_Hash_SHA3_state_t){
           .block_state = block_state1,
           .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff)
         }
       );
   }
@@ -421,19 +415,19 @@ Hacl_Streaming_Keccak_update(Hacl_Streaming_Keccak_state *p, uint8_t *data, uint
 }
 
 static void
-finish_(
+digest_(
   Spec_Hash_Definitions_hash_alg a,
-  Hacl_Streaming_Keccak_state *p,
-  uint8_t *dst,
+  Hacl_Hash_SHA3_state_t *state,
+  uint8_t *output,
   uint32_t l
 )
 {
-  Hacl_Streaming_Keccak_state scrut0 = *p;
-  Hacl_Streaming_Keccak_hash_buf block_state = scrut0.block_state;
+  Hacl_Hash_SHA3_state_t scrut0 = *state;
+  Hacl_Hash_SHA3_hash_buf block_state = scrut0.block_state;
   uint8_t *buf_ = scrut0.buf;
   uint64_t total_len = scrut0.total_len;
   uint32_t r;
-  if (total_len % (uint64_t)block_len(a) == (uint64_t)0U && total_len > (uint64_t)0U)
+  if (total_len % (uint64_t)block_len(a) == 0ULL && total_len > 0ULL)
   {
     r = block_len(a);
   }
@@ -443,13 +437,13 @@ finish_(
   }
   uint8_t *buf_1 = buf_;
   uint64_t buf[25U] = { 0U };
-  Hacl_Streaming_Keccak_hash_buf tmp_block_state = { .fst = a, .snd = buf };
+  Hacl_Hash_SHA3_hash_buf tmp_block_state = { .fst = a, .snd = buf };
   hash_buf2 scrut = { .fst = block_state, .snd = tmp_block_state };
   uint64_t *s_dst = scrut.snd.snd;
   uint64_t *s_src = scrut.fst.snd;
-  memcpy(s_dst, s_src, (uint32_t)25U * sizeof (uint64_t));
+  memcpy(s_dst, s_src, 25U * sizeof (uint64_t));
   uint32_t ite;
-  if (r % block_len(a) == (uint32_t)0U && r > (uint32_t)0U)
+  if (r % block_len(a) == 0U && r > 0U)
   {
     ite = block_len(a);
   }
@@ -461,7 +455,7 @@ finish_(
   uint8_t *buf_multi = buf_1;
   Spec_Hash_Definitions_hash_alg a1 = tmp_block_state.fst;
   uint64_t *s0 = tmp_block_state.snd;
-  Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, (uint32_t)0U / block_len(a1));
+  Hacl_Hash_SHA3_update_multi_sha3(a1, s0, buf_multi, 0U / block_len(a1));
   Spec_Hash_Definitions_hash_alg a10 = tmp_block_state.fst;
   uint64_t *s1 = tmp_block_state.snd;
   Hacl_Hash_SHA3_update_last_sha3(a10, s1, buf_last, r);
@@ -469,258 +463,182 @@ finish_(
   uint64_t *s = tmp_block_state.snd;
   if (a11 == Spec_Hash_Definitions_Shake128 || a11 == Spec_Hash_Definitions_Shake256)
   {
-    Hacl_Impl_SHA3_squeeze(s, block_len(a11), l, dst);
+    Hacl_Hash_SHA3_squeeze0(s, block_len(a11), l, output);
     return;
   }
-  Hacl_Impl_SHA3_squeeze(s, block_len(a11), hash_len(a11), dst);
+  Hacl_Hash_SHA3_squeeze0(s, block_len(a11), hash_len(a11), output);
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_finish(Hacl_Streaming_Keccak_state *s, uint8_t *dst)
+Hacl_Hash_SHA3_digest(Hacl_Hash_SHA3_state_t *state, uint8_t *output)
 {
-  Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(state);
   if (a1 == Spec_Hash_Definitions_Shake128 || a1 == Spec_Hash_Definitions_Shake256)
   {
     return Hacl_Streaming_Types_InvalidAlgorithm;
   }
-  finish_(a1, s, dst, hash_len(a1));
+  digest_(a1, state, output, hash_len(a1));
   return Hacl_Streaming_Types_Success;
 }
 
 Hacl_Streaming_Types_error_code
-Hacl_Streaming_Keccak_squeeze(Hacl_Streaming_Keccak_state *s, uint8_t *dst, uint32_t l)
+Hacl_Hash_SHA3_squeeze(Hacl_Hash_SHA3_state_t *s, uint8_t *dst, uint32_t l)
 {
-  Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(s);
   if (!(a1 == Spec_Hash_Definitions_Shake128 || a1 == Spec_Hash_Definitions_Shake256))
   {
     return Hacl_Streaming_Types_InvalidAlgorithm;
   }
-  if (l == (uint32_t)0U)
+  if (l == 0U)
   {
     return Hacl_Streaming_Types_InvalidLength;
   }
-  finish_(a1, s, dst, l);
+  digest_(a1, s, dst, l);
   return Hacl_Streaming_Types_Success;
 }
 
-uint32_t Hacl_Streaming_Keccak_block_len(Hacl_Streaming_Keccak_state *s)
+uint32_t Hacl_Hash_SHA3_block_len(Hacl_Hash_SHA3_state_t *s)
 {
-  Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(s);
   return block_len(a1);
 }
 
-uint32_t Hacl_Streaming_Keccak_hash_len(Hacl_Streaming_Keccak_state *s)
+uint32_t Hacl_Hash_SHA3_hash_len(Hacl_Hash_SHA3_state_t *s)
 {
-  Spec_Hash_Definitions_hash_alg a1 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg a1 = Hacl_Hash_SHA3_get_alg(s);
   return hash_len(a1);
 }
 
-bool Hacl_Streaming_Keccak_is_shake(Hacl_Streaming_Keccak_state *s)
+bool Hacl_Hash_SHA3_is_shake(Hacl_Hash_SHA3_state_t *s)
 {
-  Spec_Hash_Definitions_hash_alg uu____0 = Hacl_Streaming_Keccak_get_alg(s);
+  Spec_Hash_Definitions_hash_alg uu____0 = Hacl_Hash_SHA3_get_alg(s);
   return uu____0 == Spec_Hash_Definitions_Shake128 || uu____0 == Spec_Hash_Definitions_Shake256;
 }
 
 void
-Hacl_SHA3_shake128_hacl(
+Hacl_Hash_SHA3_shake128_hacl(
   uint32_t inputByteLen,
   uint8_t *input,
   uint32_t outputByteLen,
   uint8_t *output
 )
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1344U,
-    (uint32_t)256U,
-    inputByteLen,
-    input,
-    (uint8_t)0x1FU,
-    outputByteLen,
-    output);
+  Hacl_Hash_SHA3_keccak(1344U, 256U, inputByteLen, input, 0x1FU, outputByteLen, output);
 }
 
 void
-Hacl_SHA3_shake256_hacl(
+Hacl_Hash_SHA3_shake256_hacl(
   uint32_t inputByteLen,
   uint8_t *input,
   uint32_t outputByteLen,
   uint8_t *output
 )
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1088U,
-    (uint32_t)512U,
-    inputByteLen,
-    input,
-    (uint8_t)0x1FU,
-    outputByteLen,
-    output);
+  Hacl_Hash_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x1FU, outputByteLen, output);
 }
 
-void Hacl_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_224(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1152U,
-    (uint32_t)448U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)28U,
-    output);
+  Hacl_Hash_SHA3_keccak(1152U, 448U, inputByteLen, input, 0x06U, 28U, output);
 }
 
-void Hacl_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_256(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)1088U,
-    (uint32_t)512U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)32U,
-    output);
+  Hacl_Hash_SHA3_keccak(1088U, 512U, inputByteLen, input, 0x06U, 32U, output);
 }
 
-void Hacl_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_384(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)832U,
-    (uint32_t)768U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)48U,
-    output);
+  Hacl_Hash_SHA3_keccak(832U, 768U, inputByteLen, input, 0x06U, 48U, output);
 }
 
-void Hacl_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
+void Hacl_Hash_SHA3_sha3_512(uint32_t inputByteLen, uint8_t *input, uint8_t *output)
 {
-  Hacl_Impl_SHA3_keccak((uint32_t)576U,
-    (uint32_t)1024U,
-    inputByteLen,
-    input,
-    (uint8_t)0x06U,
-    (uint32_t)64U,
-    output);
+  Hacl_Hash_SHA3_keccak(576U, 1024U, inputByteLen, input, 0x06U, 64U, output);
 }
 
 static const
 uint32_t
 keccak_rotc[24U] =
   {
-    (uint32_t)1U, (uint32_t)3U, (uint32_t)6U, (uint32_t)10U, (uint32_t)15U, (uint32_t)21U,
-    (uint32_t)28U, (uint32_t)36U, (uint32_t)45U, (uint32_t)55U, (uint32_t)2U, (uint32_t)14U,
-    (uint32_t)27U, (uint32_t)41U, (uint32_t)56U, (uint32_t)8U, (uint32_t)25U, (uint32_t)43U,
-    (uint32_t)62U, (uint32_t)18U, (uint32_t)39U, (uint32_t)61U, (uint32_t)20U, (uint32_t)44U
+    1U, 3U, 6U, 10U, 15U, 21U, 28U, 36U, 45U, 55U, 2U, 14U, 27U, 41U, 56U, 8U, 25U, 43U, 62U, 18U,
+    39U, 61U, 20U, 44U
   };
 
 static const
 uint32_t
 keccak_piln[24U] =
   {
-    (uint32_t)10U, (uint32_t)7U, (uint32_t)11U, (uint32_t)17U, (uint32_t)18U, (uint32_t)3U,
-    (uint32_t)5U, (uint32_t)16U, (uint32_t)8U, (uint32_t)21U, (uint32_t)24U, (uint32_t)4U,
-    (uint32_t)15U, (uint32_t)23U, (uint32_t)19U, (uint32_t)13U, (uint32_t)12U, (uint32_t)2U,
-    (uint32_t)20U, (uint32_t)14U, (uint32_t)22U, (uint32_t)9U, (uint32_t)6U, (uint32_t)1U
+    10U, 7U, 11U, 17U, 18U, 3U, 5U, 16U, 8U, 21U, 24U, 4U, 15U, 23U, 19U, 13U, 12U, 2U, 20U, 14U,
+    22U, 9U, 6U, 1U
   };
 
 static const
 uint64_t
 keccak_rndc[24U] =
   {
-    (uint64_t)0x0000000000000001U, (uint64_t)0x0000000000008082U, (uint64_t)0x800000000000808aU,
-    (uint64_t)0x8000000080008000U, (uint64_t)0x000000000000808bU, (uint64_t)0x0000000080000001U,
-    (uint64_t)0x8000000080008081U, (uint64_t)0x8000000000008009U, (uint64_t)0x000000000000008aU,
-    (uint64_t)0x0000000000000088U, (uint64_t)0x0000000080008009U, (uint64_t)0x000000008000000aU,
-    (uint64_t)0x000000008000808bU, (uint64_t)0x800000000000008bU, (uint64_t)0x8000000000008089U,
-    (uint64_t)0x8000000000008003U, (uint64_t)0x8000000000008002U, (uint64_t)0x8000000000000080U,
-    (uint64_t)0x000000000000800aU, (uint64_t)0x800000008000000aU, (uint64_t)0x8000000080008081U,
-    (uint64_t)0x8000000000008080U, (uint64_t)0x0000000080000001U, (uint64_t)0x8000000080008008U
+    0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
+    0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
+    0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
+    0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
+    0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL,
+    0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
   };
 
-void Hacl_Impl_SHA3_state_permute(uint64_t *s)
+void Hacl_Hash_SHA3_state_permute(uint64_t *s)
 {
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)24U; i0++)
+  for (uint32_t i0 = 0U; i0 < 24U; i0++)
   {
     uint64_t _C[5U] = { 0U };
     KRML_MAYBE_FOR5(i,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      _C[i] =
-        s[i
-        + (uint32_t)0U]
-        ^
-          (s[i
-          + (uint32_t)5U]
-          ^ (s[i + (uint32_t)10U] ^ (s[i + (uint32_t)15U] ^ s[i + (uint32_t)20U]))););
+      0U,
+      5U,
+      1U,
+      _C[i] = s[i + 0U] ^ (s[i + 5U] ^ (s[i + 10U] ^ (s[i + 15U] ^ s[i + 20U]))););
     KRML_MAYBE_FOR5(i1,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      uint64_t uu____0 = _C[(i1 + (uint32_t)1U) % (uint32_t)5U];
-      uint64_t
-      _D =
-        _C[(i1 + (uint32_t)4U)
-        % (uint32_t)5U]
-        ^ (uu____0 << (uint32_t)1U | uu____0 >> (uint32_t)63U);
-      KRML_MAYBE_FOR5(i,
-        (uint32_t)0U,
-        (uint32_t)5U,
-        (uint32_t)1U,
-        s[i1 + (uint32_t)5U * i] = s[i1 + (uint32_t)5U * i] ^ _D;););
+      0U,
+      5U,
+      1U,
+      uint64_t uu____0 = _C[(i1 + 1U) % 5U];
+      uint64_t _D = _C[(i1 + 4U) % 5U] ^ (uu____0 << 1U | uu____0 >> 63U);
+      KRML_MAYBE_FOR5(i, 0U, 5U, 1U, s[i1 + 5U * i] = s[i1 + 5U * i] ^ _D;););
     uint64_t x = s[1U];
     uint64_t current = x;
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)24U; i++)
+    for (uint32_t i = 0U; i < 24U; i++)
     {
       uint32_t _Y = keccak_piln[i];
       uint32_t r = keccak_rotc[i];
       uint64_t temp = s[_Y];
       uint64_t uu____1 = current;
-      s[_Y] = uu____1 << r | uu____1 >> ((uint32_t)64U - r);
+      s[_Y] = uu____1 << r | uu____1 >> (64U - r);
       current = temp;
     }
     KRML_MAYBE_FOR5(i,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      uint64_t
-      v0 =
-        s[(uint32_t)0U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)1U + (uint32_t)5U * i] & s[(uint32_t)2U + (uint32_t)5U * i]);
-      uint64_t
-      v1 =
-        s[(uint32_t)1U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)2U + (uint32_t)5U * i] & s[(uint32_t)3U + (uint32_t)5U * i]);
-      uint64_t
-      v2 =
-        s[(uint32_t)2U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)3U + (uint32_t)5U * i] & s[(uint32_t)4U + (uint32_t)5U * i]);
-      uint64_t
-      v3 =
-        s[(uint32_t)3U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)4U + (uint32_t)5U * i] & s[(uint32_t)0U + (uint32_t)5U * i]);
-      uint64_t
-      v4 =
-        s[(uint32_t)4U
-        + (uint32_t)5U * i]
-        ^ (~s[(uint32_t)0U + (uint32_t)5U * i] & s[(uint32_t)1U + (uint32_t)5U * i]);
-      s[(uint32_t)0U + (uint32_t)5U * i] = v0;
-      s[(uint32_t)1U + (uint32_t)5U * i] = v1;
-      s[(uint32_t)2U + (uint32_t)5U * i] = v2;
-      s[(uint32_t)3U + (uint32_t)5U * i] = v3;
-      s[(uint32_t)4U + (uint32_t)5U * i] = v4;);
+      0U,
+      5U,
+      1U,
+      uint64_t v0 = s[0U + 5U * i] ^ (~s[1U + 5U * i] & s[2U + 5U * i]);
+      uint64_t v1 = s[1U + 5U * i] ^ (~s[2U + 5U * i] & s[3U + 5U * i]);
+      uint64_t v2 = s[2U + 5U * i] ^ (~s[3U + 5U * i] & s[4U + 5U * i]);
+      uint64_t v3 = s[3U + 5U * i] ^ (~s[4U + 5U * i] & s[0U + 5U * i]);
+      uint64_t v4 = s[4U + 5U * i] ^ (~s[0U + 5U * i] & s[1U + 5U * i]);
+      s[0U + 5U * i] = v0;
+      s[1U + 5U * i] = v1;
+      s[2U + 5U * i] = v2;
+      s[3U + 5U * i] = v3;
+      s[4U + 5U * i] = v4;);
     uint64_t c = keccak_rndc[i0];
     s[0U] = s[0U] ^ c;
   }
 }
 
-void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
+void Hacl_Hash_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
 {
   uint8_t block[200U] = { 0U };
   memcpy(block, input, rateInBytes * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
-    uint64_t u = load64_le(block + i * (uint32_t)8U);
+    uint64_t u = load64_le(block + i * 8U);
     uint64_t x = u;
     s[i] = s[i] ^ x;
   }
@@ -729,18 +647,18 @@ void Hacl_Impl_SHA3_loadState(uint32_t rateInBytes, uint8_t *input, uint64_t *s)
 static void storeState(uint32_t rateInBytes, uint64_t *s, uint8_t *res)
 {
   uint8_t block[200U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
     uint64_t sj = s[i];
-    store64_le(block + i * (uint32_t)8U, sj);
+    store64_le(block + i * 8U, sj);
   }
   memcpy(res, block, rateInBytes * sizeof (uint8_t));
 }
 
-void Hacl_Impl_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s)
+void Hacl_Hash_SHA3_absorb_inner(uint32_t rateInBytes, uint8_t *block, uint64_t *s)
 {
-  Hacl_Impl_SHA3_loadState(rateInBytes, block, s);
-  Hacl_Impl_SHA3_state_permute(s);
+  Hacl_Hash_SHA3_loadState(rateInBytes, block, s);
+  Hacl_Hash_SHA3_state_permute(s);
 }
 
 static void
@@ -754,30 +672,30 @@ absorb(
 {
   uint32_t n_blocks = inputByteLen / rateInBytes;
   uint32_t rem = inputByteLen % rateInBytes;
-  for (uint32_t i = (uint32_t)0U; i < n_blocks; i++)
+  for (uint32_t i = 0U; i < n_blocks; i++)
   {
     uint8_t *block = input + i * rateInBytes;
-    Hacl_Impl_SHA3_absorb_inner(rateInBytes, block, s);
+    Hacl_Hash_SHA3_absorb_inner(rateInBytes, block, s);
   }
   uint8_t *last = input + n_blocks * rateInBytes;
   uint8_t lastBlock_[200U] = { 0U };
   uint8_t *lastBlock = lastBlock_;
   memcpy(lastBlock, last, rem * sizeof (uint8_t));
   lastBlock[rem] = delimitedSuffix;
-  Hacl_Impl_SHA3_loadState(rateInBytes, lastBlock, s);
-  if (!((delimitedSuffix & (uint8_t)0x80U) == (uint8_t)0U) && rem == rateInBytes - (uint32_t)1U)
+  Hacl_Hash_SHA3_loadState(rateInBytes, lastBlock, s);
+  if (!(((uint32_t)delimitedSuffix & 0x80U) == 0U) && rem == rateInBytes - 1U)
   {
-    Hacl_Impl_SHA3_state_permute(s);
+    Hacl_Hash_SHA3_state_permute(s);
   }
   uint8_t nextBlock_[200U] = { 0U };
   uint8_t *nextBlock = nextBlock_;
-  nextBlock[rateInBytes - (uint32_t)1U] = (uint8_t)0x80U;
-  Hacl_Impl_SHA3_loadState(rateInBytes, nextBlock, s);
-  Hacl_Impl_SHA3_state_permute(s);
+  nextBlock[rateInBytes - 1U] = 0x80U;
+  Hacl_Hash_SHA3_loadState(rateInBytes, nextBlock, s);
+  Hacl_Hash_SHA3_state_permute(s);
 }
 
 void
-Hacl_Impl_SHA3_squeeze(
+Hacl_Hash_SHA3_squeeze0(
   uint64_t *s,
   uint32_t rateInBytes,
   uint32_t outputByteLen,
@@ -788,16 +706,16 @@ Hacl_Impl_SHA3_squeeze(
   uint32_t remOut = outputByteLen % rateInBytes;
   uint8_t *last = output + outputByteLen - remOut;
   uint8_t *blocks = output;
-  for (uint32_t i = (uint32_t)0U; i < outBlocks; i++)
+  for (uint32_t i = 0U; i < outBlocks; i++)
   {
     storeState(rateInBytes, s, blocks + i * rateInBytes);
-    Hacl_Impl_SHA3_state_permute(s);
+    Hacl_Hash_SHA3_state_permute(s);
   }
   storeState(remOut, s, last);
 }
 
 void
-Hacl_Impl_SHA3_keccak(
+Hacl_Hash_SHA3_keccak(
   uint32_t rate,
   uint32_t capacity,
   uint32_t inputByteLen,
@@ -807,10 +725,10 @@ Hacl_Impl_SHA3_keccak(
   uint8_t *output
 )
 {
-  KRML_HOST_IGNORE(capacity);
-  uint32_t rateInBytes = rate / (uint32_t)8U;
+  KRML_MAYBE_UNUSED_VAR(capacity);
+  uint32_t rateInBytes = rate / 8U;
   uint64_t s[25U] = { 0U };
   absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix);
-  Hacl_Impl_SHA3_squeeze(s, rateInBytes, outputByteLen, output);
+  Hacl_Hash_SHA3_squeeze0(s, rateInBytes, outputByteLen, output);
 }
 
diff --git a/src/msvc/Hacl_K256_ECDSA.c b/src/msvc/Hacl_K256_ECDSA.c
index c5dda43f..f9bf31ed 100644
--- a/src/msvc/Hacl_K256_ECDSA.c
+++ b/src/msvc/Hacl_K256_ECDSA.c
@@ -35,27 +35,27 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
 {
   uint64_t *a0 = a;
   uint64_t *res0 = res;
-  uint64_t c0 = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < bLen / (uint32_t)4U; i++)
+  uint64_t c0 = 0ULL;
+  for (uint32_t i = 0U; i < bLen / 4U; i++)
   {
-    uint64_t t1 = a0[(uint32_t)4U * i];
-    uint64_t t20 = b[(uint32_t)4U * i];
-    uint64_t *res_i0 = res0 + (uint32_t)4U * i;
+    uint64_t t1 = a0[4U * i];
+    uint64_t t20 = b[4U * i];
+    uint64_t *res_i0 = res0 + 4U * i;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a0[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * i + (uint32_t)1U];
-    uint64_t *res_i1 = res0 + (uint32_t)4U * i + (uint32_t)1U;
+    uint64_t t10 = a0[4U * i + 1U];
+    uint64_t t21 = b[4U * i + 1U];
+    uint64_t *res_i1 = res0 + 4U * i + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a0[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * i + (uint32_t)2U];
-    uint64_t *res_i2 = res0 + (uint32_t)4U * i + (uint32_t)2U;
+    uint64_t t11 = a0[4U * i + 2U];
+    uint64_t t22 = b[4U * i + 2U];
+    uint64_t *res_i2 = res0 + 4U * i + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a0[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * i + (uint32_t)3U];
-    uint64_t *res_i = res0 + (uint32_t)4U * i + (uint32_t)3U;
+    uint64_t t12 = a0[4U * i + 3U];
+    uint64_t t2 = b[4U * i + 3U];
+    uint64_t *res_i = res0 + 4U * i + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
-  for (uint32_t i = bLen / (uint32_t)4U * (uint32_t)4U; i < bLen; i++)
+  for (uint32_t i = bLen / 4U * 4U; i < bLen; i++)
   {
     uint64_t t1 = a0[i];
     uint64_t t2 = b[i];
@@ -68,26 +68,26 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
     uint64_t *a1 = a + bLen;
     uint64_t *res1 = res + bLen;
     uint64_t c = c00;
-    for (uint32_t i = (uint32_t)0U; i < (aLen - bLen) / (uint32_t)4U; i++)
+    for (uint32_t i = 0U; i < (aLen - bLen) / 4U; i++)
     {
-      uint64_t t1 = a1[(uint32_t)4U * i];
-      uint64_t *res_i0 = res1 + (uint32_t)4U * i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i0);
-      uint64_t t10 = a1[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res1 + (uint32_t)4U * i + (uint32_t)1U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, (uint64_t)0U, res_i1);
-      uint64_t t11 = a1[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res1 + (uint32_t)4U * i + (uint32_t)2U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, (uint64_t)0U, res_i2);
-      uint64_t t12 = a1[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res1 + (uint32_t)4U * i + (uint32_t)3U;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, (uint64_t)0U, res_i);
+      uint64_t t1 = a1[4U * i];
+      uint64_t *res_i0 = res1 + 4U * i;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i0);
+      uint64_t t10 = a1[4U * i + 1U];
+      uint64_t *res_i1 = res1 + 4U * i + 1U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, 0ULL, res_i1);
+      uint64_t t11 = a1[4U * i + 2U];
+      uint64_t *res_i2 = res1 + 4U * i + 2U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, 0ULL, res_i2);
+      uint64_t t12 = a1[4U * i + 3U];
+      uint64_t *res_i = res1 + 4U * i + 3U;
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, 0ULL, res_i);
     }
-    for (uint32_t i = (aLen - bLen) / (uint32_t)4U * (uint32_t)4U; i < aLen - bLen; i++)
+    for (uint32_t i = (aLen - bLen) / 4U * 4U; i < aLen - bLen; i++)
     {
       uint64_t t1 = a1[i];
       uint64_t *res_i = res1 + i;
-      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);
+      c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i);
     }
     uint64_t c1 = c;
     return c1;
@@ -97,23 +97,23 @@ bn_add(uint32_t aLen, uint64_t *a, uint32_t bLen, uint64_t *b, uint64_t *res)
 
 static uint64_t add4(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   return c;
@@ -121,52 +121,52 @@ static uint64_t add4(uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x;);
@@ -174,53 +174,53 @@ static void add_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = a[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = b[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = a[4U * 0U];
+    uint64_t t20 = b[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = a[4U * 0U + 1U];
+    uint64_t t21 = b[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = a[4U * 0U + 2U];
+    uint64_t t22 = b[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = b[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = a[4U * 0U + 3U];
+    uint64_t t2 = b[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x;);
@@ -228,59 +228,59 @@ static void sub_mod4(uint64_t *n, uint64_t *a, uint64_t *b, uint64_t *res)
 
 static void mul4(uint64_t *a, uint64_t *b, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = b[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = a[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = a[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 static void sqr4(uint64_t *a, uint64_t *res)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = a;
     uint64_t a_j = a[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -288,30 +288,30 @@ static void sqr4(uint64_t *a, uint64_t *res)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(a[i], a[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline uint64_t is_qelem_zero(uint64_t *f)
 {
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -325,33 +325,33 @@ static inline bool is_qelem_zero_vartime(uint64_t *f)
   uint64_t f1 = f[1U];
   uint64_t f2 = f[2U];
   uint64_t f3 = f[3U];
-  return f0 == (uint64_t)0U && f1 == (uint64_t)0U && f2 == (uint64_t)0U && f3 == (uint64_t)0U;
+  return f0 == 0ULL && f1 == 0ULL && f2 == 0ULL && f3 == 0ULL;
 }
 
 static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   uint64_t is_zero = is_qelem_zero(f);
-  uint64_t acc = (uint64_t)0U;
+  uint64_t acc = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t beq = FStar_UInt64_eq_mask(f[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(f[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U))););
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL))););
   uint64_t is_lt_q = acc;
   return ~is_zero & is_lt_q;
 }
@@ -359,11 +359,11 @@ static inline uint64_t load_qelem_check(uint64_t *f, uint8_t *b)
 static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
   bool is_zero = is_qelem_zero_vartime(f);
@@ -372,29 +372,29 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
   bool is_lt_q_b;
-  if (a3 < (uint64_t)0xffffffffffffffffU)
+  if (a3 < 0xffffffffffffffffULL)
   {
     is_lt_q_b = true;
   }
-  else if (a2 < (uint64_t)0xfffffffffffffffeU)
+  else if (a2 < 0xfffffffffffffffeULL)
   {
     is_lt_q_b = true;
   }
-  else if (a2 > (uint64_t)0xfffffffffffffffeU)
+  else if (a2 > 0xfffffffffffffffeULL)
   {
     is_lt_q_b = false;
   }
-  else if (a1 < (uint64_t)0xbaaedce6af48a03bU)
+  else if (a1 < 0xbaaedce6af48a03bULL)
   {
     is_lt_q_b = true;
   }
-  else if (a1 > (uint64_t)0xbaaedce6af48a03bU)
+  else if (a1 > 0xbaaedce6af48a03bULL)
   {
     is_lt_q_b = false;
   }
   else
   {
-    is_lt_q_b = a0 < (uint64_t)0xbfd25e8cd0364141U;
+    is_lt_q_b = a0 < 0xbfd25e8cd0364141ULL;
   }
   return !is_zero && is_lt_q_b;
 }
@@ -402,16 +402,16 @@ static inline bool load_qelem_vartime(uint64_t *f, uint8_t *b)
 static inline void modq_short(uint64_t *out, uint64_t *a)
 {
   uint64_t tmp[4U] = { 0U };
-  tmp[0U] = (uint64_t)0x402da1732fc9bebfU;
-  tmp[1U] = (uint64_t)0x4551231950b75fc4U;
-  tmp[2U] = (uint64_t)0x1U;
-  tmp[3U] = (uint64_t)0x0U;
+  tmp[0U] = 0x402da1732fc9bebfULL;
+  tmp[1U] = 0x4551231950b75fc4ULL;
+  tmp[2U] = 0x1ULL;
+  tmp[3U] = 0x0ULL;
   uint64_t c = add4(a, tmp, out);
-  uint64_t mask = (uint64_t)0U - c;
+  uint64_t mask = 0ULL - c;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = out;
     uint64_t x = (mask & out[i]) | (~mask & a[i]);
     os[i] = x;);
@@ -421,35 +421,31 @@ static inline void load_qelem_modq(uint64_t *f, uint8_t *b)
 {
   uint64_t tmp[4U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = f;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
-  memcpy(tmp, f, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, f, 4U * sizeof (uint64_t));
   modq_short(f, tmp);
 }
 
 static inline void store_qelem(uint8_t *b, uint64_t *f)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(b + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(b + i * 8U, f[4U - i - 1U]););
 }
 
 static inline void qadd(uint64_t *out, uint64_t *f1, uint64_t *f2)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   add_mod4(n, f1, f2, out);
 }
 
@@ -463,33 +459,33 @@ mul_pow2_256_minus_q_add(
   uint64_t *res
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t), len + (uint32_t)2U);
-  uint64_t *tmp = (uint64_t *)alloca((len + (uint32_t)2U) * sizeof (uint64_t));
-  memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t));
-  memset(tmp, 0U, (len + (uint32_t)2U) * sizeof (uint64_t));
+  KRML_CHECK_SIZE(sizeof (uint64_t), len + 2U);
+  uint64_t *tmp = (uint64_t *)alloca((len + 2U) * sizeof (uint64_t));
+  memset(tmp, 0U, (len + 2U) * sizeof (uint64_t));
+  memset(tmp, 0U, (len + 2U) * sizeof (uint64_t));
   KRML_MAYBE_FOR2(i0,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint64_t bj = t01[i0];
     uint64_t *res_j = tmp + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < len / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < len / 4U; i++)
     {
-      uint64_t a_i = a[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = a[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = a[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = a[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = a[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = a[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = a[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = a[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
-    for (uint32_t i = len / (uint32_t)4U * (uint32_t)4U; i < len; i++)
+    for (uint32_t i = len / 4U * 4U; i < len; i++)
     {
       uint64_t a_i = a[i];
       uint64_t *res_i = res_j + i;
@@ -497,9 +493,9 @@ mul_pow2_256_minus_q_add(
     }
     uint64_t r = c;
     tmp[len + i0] = r;);
-  memcpy(res + (uint32_t)2U, a, len * sizeof (uint64_t));
-  KRML_HOST_IGNORE(bn_add(resLen, res, len + (uint32_t)2U, tmp, res));
-  uint64_t c = bn_add(resLen, res, (uint32_t)4U, e, res);
+  memcpy(res + 2U, a, len * sizeof (uint64_t));
+  bn_add(resLen, res, len + 2U, tmp, res);
+  uint64_t c = bn_add(resLen, res, 4U, e, res);
   return c;
 }
 
@@ -507,34 +503,23 @@ static inline void modq(uint64_t *out, uint64_t *a)
 {
   uint64_t r[4U] = { 0U };
   uint64_t tmp[4U] = { 0U };
-  tmp[0U] = (uint64_t)0x402da1732fc9bebfU;
-  tmp[1U] = (uint64_t)0x4551231950b75fc4U;
-  tmp[2U] = (uint64_t)0x1U;
-  tmp[3U] = (uint64_t)0x0U;
+  tmp[0U] = 0x402da1732fc9bebfULL;
+  tmp[1U] = 0x4551231950b75fc4ULL;
+  tmp[2U] = 0x1ULL;
+  tmp[3U] = 0x0ULL;
   uint64_t *t01 = tmp;
   uint64_t m[7U] = { 0U };
   uint64_t p[5U] = { 0U };
-  KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)4U,
-      (uint32_t)7U,
-      t01,
-      a + (uint32_t)4U,
-      a,
-      m));
-  KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)3U,
-      (uint32_t)5U,
-      t01,
-      m + (uint32_t)4U,
-      m,
-      p));
-  uint64_t
-  c2 = mul_pow2_256_minus_q_add((uint32_t)1U, (uint32_t)4U, t01, p + (uint32_t)4U, p, r);
+  mul_pow2_256_minus_q_add(4U, 7U, t01, a + 4U, a, m);
+  mul_pow2_256_minus_q_add(3U, 5U, t01, m + 4U, m, p);
+  uint64_t c2 = mul_pow2_256_minus_q_add(1U, 4U, t01, p + 4U, p, r);
   uint64_t c0 = c2;
   uint64_t c1 = add4(r, tmp, out);
-  uint64_t mask = (uint64_t)0U - (c0 + c1);
+  uint64_t mask = 0ULL - (c0 + c1);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = out;
     uint64_t x = (mask & out[i]) | (~mask & r[i]);
     os[i] = x;);
@@ -557,10 +542,10 @@ static inline void qsqr(uint64_t *out, uint64_t *f)
 static inline void qnegate_conditional_vartime(uint64_t *f, bool is_negate)
 {
   uint64_t n[4U] = { 0U };
-  n[0U] = (uint64_t)0xbfd25e8cd0364141U;
-  n[1U] = (uint64_t)0xbaaedce6af48a03bU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0xffffffffffffffffU;
+  n[0U] = 0xbfd25e8cd0364141ULL;
+  n[1U] = 0xbaaedce6af48a03bULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0xffffffffffffffffULL;
   uint64_t zero[4U] = { 0U };
   if (is_negate)
   {
@@ -574,31 +559,31 @@ static inline bool is_qelem_le_q_halved_vartime(uint64_t *f)
   uint64_t a1 = f[1U];
   uint64_t a2 = f[2U];
   uint64_t a3 = f[3U];
-  if (a3 < (uint64_t)0x7fffffffffffffffU)
+  if (a3 < 0x7fffffffffffffffULL)
   {
     return true;
   }
-  if (a3 > (uint64_t)0x7fffffffffffffffU)
+  if (a3 > 0x7fffffffffffffffULL)
   {
     return false;
   }
-  if (a2 < (uint64_t)0xffffffffffffffffU)
+  if (a2 < 0xffffffffffffffffULL)
   {
     return true;
   }
-  if (a2 > (uint64_t)0xffffffffffffffffU)
+  if (a2 > 0xffffffffffffffffULL)
   {
     return false;
   }
-  if (a1 < (uint64_t)0x5d576e7357a4501dU)
+  if (a1 < 0x5d576e7357a4501dULL)
   {
     return true;
   }
-  if (a1 > (uint64_t)0x5d576e7357a4501dU)
+  if (a1 > 0x5d576e7357a4501dULL)
   {
     return false;
   }
-  return a0 <= (uint64_t)0xdfe92f46681b20a0U;
+  return a0 <= 0xdfe92f46681b20a0ULL;
 }
 
 static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
@@ -606,27 +591,26 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
   uint64_t l[8U] = { 0U };
   mul4(a, b, l);
   uint64_t res_b_padded[4U] = { 0U };
-  memcpy(res_b_padded, l + (uint32_t)6U, (uint32_t)2U * sizeof (uint64_t));
-  uint64_t
-  c0 = Lib_IntTypes_Intrinsics_add_carry_u64((uint64_t)0U, res_b_padded[0U], (uint64_t)1U, res);
-  uint64_t *a1 = res_b_padded + (uint32_t)1U;
-  uint64_t *res1 = res + (uint32_t)1U;
+  memcpy(res_b_padded, l + 6U, 2U * sizeof (uint64_t));
+  uint64_t c0 = Lib_IntTypes_Intrinsics_add_carry_u64(0ULL, res_b_padded[0U], 1ULL, res);
+  uint64_t *a1 = res_b_padded + 1U;
+  uint64_t *res1 = res + 1U;
   uint64_t c = c0;
   KRML_MAYBE_FOR3(i,
-    (uint32_t)0U,
-    (uint32_t)3U,
-    (uint32_t)1U,
+    0U,
+    3U,
+    1U,
     uint64_t t1 = a1[i];
     uint64_t *res_i = res1 + i;
-    c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i););
+    c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, 0ULL, res_i););
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t flag = l[5U] >> (uint32_t)63U;
-  uint64_t mask = (uint64_t)0U - flag;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t flag = l[5U] >> 63U;
+  uint64_t mask = 0ULL - flag;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x = (mask & res[i]) | (~mask & res_b_padded[i]);
     os[i] = x;);
@@ -634,7 +618,7 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b)
 
 static inline void qsquare_times_in_place(uint64_t *out, uint32_t b)
 {
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  for (uint32_t i = 0U; i < b; i++)
   {
     qsqr(out, out);
   }
@@ -642,8 +626,8 @@ static inline void qsquare_times_in_place(uint64_t *out, uint32_t b)
 
 static inline void qsquare_times(uint64_t *out, uint64_t *a, uint32_t b)
 {
-  memcpy(out, a, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < b; i++)
+  memcpy(out, a, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < b; i++)
   {
     qsqr(out, out);
   }
@@ -658,7 +642,7 @@ static inline void qinv(uint64_t *out, uint64_t *f)
   uint64_t x_1001[4U] = { 0U };
   uint64_t x_1011[4U] = { 0U };
   uint64_t x_1101[4U] = { 0U };
-  qsquare_times(x_10, f, (uint32_t)1U);
+  qsquare_times(x_10, f, 1U);
   qmul(x_11, x_10, f);
   qmul(x_101, x_10, x_11);
   qmul(x_111, x_10, x_101);
@@ -668,89 +652,89 @@ static inline void qinv(uint64_t *out, uint64_t *f)
   uint64_t x6[4U] = { 0U };
   uint64_t x8[4U] = { 0U };
   uint64_t x14[4U] = { 0U };
-  qsquare_times(x6, x_1101, (uint32_t)2U);
+  qsquare_times(x6, x_1101, 2U);
   qmul(x6, x6, x_1011);
-  qsquare_times(x8, x6, (uint32_t)2U);
+  qsquare_times(x8, x6, 2U);
   qmul(x8, x8, x_11);
-  qsquare_times(x14, x8, (uint32_t)6U);
+  qsquare_times(x14, x8, 6U);
   qmul(x14, x14, x6);
   uint64_t x56[4U] = { 0U };
-  qsquare_times(out, x14, (uint32_t)14U);
+  qsquare_times(out, x14, 14U);
   qmul(out, out, x14);
-  qsquare_times(x56, out, (uint32_t)28U);
+  qsquare_times(x56, out, 28U);
   qmul(x56, x56, out);
-  qsquare_times(out, x56, (uint32_t)56U);
+  qsquare_times(out, x56, 56U);
   qmul(out, out, x56);
-  qsquare_times_in_place(out, (uint32_t)14U);
+  qsquare_times_in_place(out, 14U);
   qmul(out, out, x14);
-  qsquare_times_in_place(out, (uint32_t)3U);
+  qsquare_times_in_place(out, 3U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)3U);
+  qsquare_times_in_place(out, 3U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_101);
-  qsquare_times_in_place(out, (uint32_t)10U);
+  qsquare_times_in_place(out, 10U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_111);
-  qsquare_times_in_place(out, (uint32_t)9U);
+  qsquare_times_in_place(out, 9U);
   qmul(out, out, x8);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1011);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)5U);
+  qsquare_times_in_place(out, 5U);
   qmul(out, out, x_11);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)10U);
+  qsquare_times_in_place(out, 10U);
   qmul(out, out, x_1101);
-  qsquare_times_in_place(out, (uint32_t)4U);
+  qsquare_times_in_place(out, 4U);
   qmul(out, out, x_1001);
-  qsquare_times_in_place(out, (uint32_t)6U);
+  qsquare_times_in_place(out, 6U);
   qmul(out, out, f);
-  qsquare_times_in_place(out, (uint32_t)8U);
+  qsquare_times_in_place(out, 8U);
   qmul(out, out, x6);
 }
 
 void Hacl_Impl_K256_Point_make_point_at_inf(uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
-  memset(px, 0U, (uint32_t)5U * sizeof (uint64_t));
-  memset(py, 0U, (uint32_t)5U * sizeof (uint64_t));
-  py[0U] = (uint64_t)1U;
-  memset(pz, 0U, (uint32_t)5U * sizeof (uint64_t));
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
+  memset(px, 0U, 5U * sizeof (uint64_t));
+  memset(py, 0U, 5U * sizeof (uint64_t));
+  py[0U] = 1ULL;
+  memset(pz, 0U, 5U * sizeof (uint64_t));
 }
 
 static inline void to_aff_point(uint64_t *p_aff, uint64_t *p)
 {
   uint64_t *x = p_aff;
-  uint64_t *y = p_aff + (uint32_t)5U;
+  uint64_t *y = p_aff + 5U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t zinv[5U] = { 0U };
   Hacl_Impl_K256_Finv_finv(zinv, z1);
   Hacl_K256_Field_fmul(x, x1, zinv);
@@ -762,7 +746,7 @@ static inline void to_aff_point(uint64_t *p_aff, uint64_t *p)
 static inline void to_aff_point_x(uint64_t *x, uint64_t *p)
 {
   uint64_t *x1 = p;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *z1 = p + 10U;
   uint64_t zinv[5U] = { 0U };
   Hacl_Impl_K256_Finv_finv(zinv, z1);
   Hacl_K256_Field_fmul(x, x1, zinv);
@@ -773,13 +757,13 @@ static inline bool is_on_curve_vartime(uint64_t *p)
 {
   uint64_t y2_exp[5U] = { 0U };
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)5U;
+  uint64_t *y = p + 5U;
   uint64_t b[5U] = { 0U };
-  b[0U] = (uint64_t)0x7U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0x7ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
   Hacl_K256_Field_fsqr(y2_exp, x);
   Hacl_K256_Field_fmul(y2_exp, y2_exp, x);
   Hacl_K256_Field_fadd(y2_exp, y2_exp, b);
@@ -795,11 +779,11 @@ static inline bool is_on_curve_vartime(uint64_t *p)
 void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   uint64_t *ox = out;
-  uint64_t *oy = out + (uint32_t)5U;
-  uint64_t *oz = out + (uint32_t)10U;
+  uint64_t *oy = out + 5U;
+  uint64_t *oz = out + 10U;
   ox[0U] = px[0U];
   ox[1U] = px[1U];
   ox[2U] = px[2U];
@@ -815,11 +799,11 @@ void Hacl_Impl_K256_Point_point_negate(uint64_t *out, uint64_t *p)
   uint64_t a2 = py[2U];
   uint64_t a3 = py[3U];
   uint64_t a4 = py[4U];
-  uint64_t r0 = (uint64_t)18014381329608892U - a0;
-  uint64_t r1 = (uint64_t)18014398509481980U - a1;
-  uint64_t r2 = (uint64_t)18014398509481980U - a2;
-  uint64_t r3 = (uint64_t)18014398509481980U - a3;
-  uint64_t r4 = (uint64_t)1125899906842620U - a4;
+  uint64_t r0 = 18014381329608892ULL - a0;
+  uint64_t r1 = 18014398509481980ULL - a1;
+  uint64_t r2 = 18014398509481980ULL - a2;
+  uint64_t r3 = 18014398509481980ULL - a3;
+  uint64_t r4 = 1125899906842620ULL - a4;
   uint64_t f0 = r0;
   uint64_t f1 = r1;
   uint64_t f2 = r2;
@@ -845,9 +829,9 @@ static inline void point_negate_conditional_vartime(uint64_t *p, bool is_negate)
 static inline void aff_point_store(uint8_t *out, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
+  uint64_t *py = p + 5U;
   Hacl_K256_Field_store_felem(out, px);
-  Hacl_K256_Field_store_felem(out + (uint32_t)32U, py);
+  Hacl_K256_Field_store_felem(out + 32U, py);
 }
 
 void Hacl_Impl_K256_Point_point_store(uint8_t *out, uint64_t *p)
@@ -860,9 +844,9 @@ void Hacl_Impl_K256_Point_point_store(uint8_t *out, uint64_t *p)
 bool Hacl_Impl_K256_Point_aff_point_load_vartime(uint64_t *p, uint8_t *b)
 {
   uint8_t *px = b;
-  uint8_t *py = b + (uint32_t)32U;
+  uint8_t *py = b + 32U;
   uint64_t *bn_px = p;
-  uint64_t *bn_py = p + (uint32_t)5U;
+  uint64_t *bn_py = p + 5U;
   bool is_x_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(bn_px, px);
   bool is_y_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(bn_py, py);
   if (is_x_valid && is_y_valid)
@@ -879,14 +863,14 @@ static inline bool load_point_vartime(uint64_t *p, uint8_t *b)
   if (res)
   {
     uint64_t *x = p_aff;
-    uint64_t *y = p_aff + (uint32_t)5U;
+    uint64_t *y = p_aff + 5U;
     uint64_t *x1 = p;
-    uint64_t *y1 = p + (uint32_t)5U;
-    uint64_t *z1 = p + (uint32_t)10U;
-    memcpy(x1, x, (uint32_t)5U * sizeof (uint64_t));
-    memcpy(y1, y, (uint32_t)5U * sizeof (uint64_t));
-    memset(z1, 0U, (uint32_t)5U * sizeof (uint64_t));
-    z1[0U] = (uint64_t)1U;
+    uint64_t *y1 = p + 5U;
+    uint64_t *z1 = p + 10U;
+    memcpy(x1, x, 5U * sizeof (uint64_t));
+    memcpy(y1, y, 5U * sizeof (uint64_t));
+    memset(z1, 0U, 5U * sizeof (uint64_t));
+    z1[0U] = 1ULL;
   }
   return res;
 }
@@ -895,24 +879,24 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
 {
   uint8_t s0 = s[0U];
   uint8_t s01 = s0;
-  if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U))
+  if (!(s01 == 0x02U || s01 == 0x03U))
   {
     return false;
   }
-  uint8_t *xb = s + (uint32_t)1U;
+  uint8_t *xb = s + 1U;
   bool is_x_valid = Hacl_K256_Field_load_felem_lt_prime_vartime(x, xb);
-  bool is_y_odd = s01 == (uint8_t)0x03U;
+  bool is_y_odd = s01 == 0x03U;
   if (!is_x_valid)
   {
     return false;
   }
   uint64_t y2[5U] = { 0U };
   uint64_t b[5U] = { 0U };
-  b[0U] = (uint64_t)0x7U;
-  b[1U] = (uint64_t)0U;
-  b[2U] = (uint64_t)0U;
-  b[3U] = (uint64_t)0U;
-  b[4U] = (uint64_t)0U;
+  b[0U] = 0x7ULL;
+  b[1U] = 0ULL;
+  b[2U] = 0ULL;
+  b[3U] = 0ULL;
+  b[4U] = 0ULL;
   Hacl_K256_Field_fsqr(y2, x);
   Hacl_K256_Field_fmul(y2, y2, x);
   Hacl_K256_Field_fadd(y2, y2, b);
@@ -930,7 +914,7 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
     return false;
   }
   uint64_t x0 = y[0U];
-  bool is_y_odd1 = (x0 & (uint64_t)1U) == (uint64_t)1U;
+  bool is_y_odd1 = (x0 & 1ULL) == 1ULL;
   Hacl_K256_Field_fnegate_conditional_vartime(y, is_y_odd1 != is_y_odd);
   return true;
 }
@@ -939,33 +923,33 @@ void Hacl_Impl_K256_PointDouble_point_double(uint64_t *out, uint64_t *p)
 {
   uint64_t tmp[25U] = { 0U };
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
   uint64_t *yy = tmp;
-  uint64_t *zz = tmp + (uint32_t)5U;
-  uint64_t *bzz3 = tmp + (uint32_t)10U;
-  uint64_t *bzz9 = tmp + (uint32_t)15U;
-  uint64_t *tmp1 = tmp + (uint32_t)20U;
+  uint64_t *zz = tmp + 5U;
+  uint64_t *bzz3 = tmp + 10U;
+  uint64_t *bzz9 = tmp + 15U;
+  uint64_t *tmp1 = tmp + 20U;
   Hacl_K256_Field_fsqr(yy, y1);
   Hacl_K256_Field_fsqr(zz, z1);
-  Hacl_K256_Field_fmul_small_num(x3, x1, (uint64_t)2U);
+  Hacl_K256_Field_fmul_small_num(x3, x1, 2ULL);
   Hacl_K256_Field_fmul(x3, x3, y1);
   Hacl_K256_Field_fmul(tmp1, yy, y1);
   Hacl_K256_Field_fmul(z3, tmp1, z1);
-  Hacl_K256_Field_fmul_small_num(z3, z3, (uint64_t)8U);
+  Hacl_K256_Field_fmul_small_num(z3, z3, 8ULL);
   Hacl_K256_Field_fnormalize_weak(z3, z3);
-  Hacl_K256_Field_fmul_small_num(bzz3, zz, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(bzz3, zz, 21ULL);
   Hacl_K256_Field_fnormalize_weak(bzz3, bzz3);
-  Hacl_K256_Field_fmul_small_num(bzz9, bzz3, (uint64_t)3U);
-  Hacl_K256_Field_fsub(bzz9, yy, bzz9, (uint64_t)6U);
+  Hacl_K256_Field_fmul_small_num(bzz9, bzz3, 3ULL);
+  Hacl_K256_Field_fsub(bzz9, yy, bzz9, 6ULL);
   Hacl_K256_Field_fadd(tmp1, yy, bzz3);
   Hacl_K256_Field_fmul(tmp1, bzz9, tmp1);
   Hacl_K256_Field_fmul(y3, yy, zz);
   Hacl_K256_Field_fmul(x3, x3, bzz9);
-  Hacl_K256_Field_fmul_small_num(y3, y3, (uint64_t)168U);
+  Hacl_K256_Field_fmul_small_num(y3, y3, 168ULL);
   Hacl_K256_Field_fadd(y3, tmp1, y3);
   Hacl_K256_Field_fnormalize_weak(y3, y3);
 }
@@ -974,23 +958,23 @@ void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[45U] = { 0U };
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)5U;
-  uint64_t *z1 = p + (uint32_t)10U;
+  uint64_t *y1 = p + 5U;
+  uint64_t *z1 = p + 10U;
   uint64_t *x2 = q;
-  uint64_t *y2 = q + (uint32_t)5U;
-  uint64_t *z2 = q + (uint32_t)10U;
+  uint64_t *y2 = q + 5U;
+  uint64_t *z2 = q + 10U;
   uint64_t *x3 = out;
-  uint64_t *y3 = out + (uint32_t)5U;
-  uint64_t *z3 = out + (uint32_t)10U;
+  uint64_t *y3 = out + 5U;
+  uint64_t *z3 = out + 10U;
   uint64_t *xx = tmp;
-  uint64_t *yy = tmp + (uint32_t)5U;
-  uint64_t *zz = tmp + (uint32_t)10U;
-  uint64_t *xy_pairs = tmp + (uint32_t)15U;
-  uint64_t *yz_pairs = tmp + (uint32_t)20U;
-  uint64_t *xz_pairs = tmp + (uint32_t)25U;
-  uint64_t *yy_m_bzz3 = tmp + (uint32_t)30U;
-  uint64_t *yy_p_bzz3 = tmp + (uint32_t)35U;
-  uint64_t *tmp1 = tmp + (uint32_t)40U;
+  uint64_t *yy = tmp + 5U;
+  uint64_t *zz = tmp + 10U;
+  uint64_t *xy_pairs = tmp + 15U;
+  uint64_t *yz_pairs = tmp + 20U;
+  uint64_t *xz_pairs = tmp + 25U;
+  uint64_t *yy_m_bzz3 = tmp + 30U;
+  uint64_t *yy_p_bzz3 = tmp + 35U;
+  uint64_t *tmp1 = tmp + 40U;
   Hacl_K256_Field_fmul(xx, x1, x2);
   Hacl_K256_Field_fmul(yy, y1, y2);
   Hacl_K256_Field_fmul(zz, z1, z2);
@@ -998,29 +982,29 @@ void Hacl_Impl_K256_PointAdd_point_add(uint64_t *out, uint64_t *p, uint64_t *q)
   Hacl_K256_Field_fadd(tmp1, x2, y2);
   Hacl_K256_Field_fmul(xy_pairs, xy_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, xx, yy);
-  Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, (uint64_t)4U);
+  Hacl_K256_Field_fsub(xy_pairs, xy_pairs, tmp1, 4ULL);
   Hacl_K256_Field_fadd(yz_pairs, y1, z1);
   Hacl_K256_Field_fadd(tmp1, y2, z2);
   Hacl_K256_Field_fmul(yz_pairs, yz_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, yy, zz);
-  Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, (uint64_t)4U);
+  Hacl_K256_Field_fsub(yz_pairs, yz_pairs, tmp1, 4ULL);
   Hacl_K256_Field_fadd(xz_pairs, x1, z1);
   Hacl_K256_Field_fadd(tmp1, x2, z2);
   Hacl_K256_Field_fmul(xz_pairs, xz_pairs, tmp1);
   Hacl_K256_Field_fadd(tmp1, xx, zz);
-  Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, (uint64_t)4U);
-  Hacl_K256_Field_fmul_small_num(tmp1, zz, (uint64_t)21U);
+  Hacl_K256_Field_fsub(xz_pairs, xz_pairs, tmp1, 4ULL);
+  Hacl_K256_Field_fmul_small_num(tmp1, zz, 21ULL);
   Hacl_K256_Field_fnormalize_weak(tmp1, tmp1);
-  Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, (uint64_t)2U);
+  Hacl_K256_Field_fsub(yy_m_bzz3, yy, tmp1, 2ULL);
   Hacl_K256_Field_fadd(yy_p_bzz3, yy, tmp1);
-  Hacl_K256_Field_fmul_small_num(x3, yz_pairs, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(x3, yz_pairs, 21ULL);
   Hacl_K256_Field_fnormalize_weak(x3, x3);
-  Hacl_K256_Field_fmul_small_num(z3, xx, (uint64_t)3U);
-  Hacl_K256_Field_fmul_small_num(y3, z3, (uint64_t)21U);
+  Hacl_K256_Field_fmul_small_num(z3, xx, 3ULL);
+  Hacl_K256_Field_fmul_small_num(y3, z3, 21ULL);
   Hacl_K256_Field_fnormalize_weak(y3, y3);
   Hacl_K256_Field_fmul(tmp1, xy_pairs, yy_m_bzz3);
   Hacl_K256_Field_fmul(x3, x3, xz_pairs);
-  Hacl_K256_Field_fsub(x3, tmp1, x3, (uint64_t)2U);
+  Hacl_K256_Field_fsub(x3, tmp1, x3, 2ULL);
   Hacl_K256_Field_fnormalize_weak(x3, x3);
   Hacl_K256_Field_fmul(tmp1, yy_p_bzz3, yy_m_bzz3);
   Hacl_K256_Field_fmul(y3, y3, xz_pairs);
@@ -1036,30 +1020,30 @@ static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k)
 {
   uint64_t tmp1[4U] = { 0U };
   uint64_t tmp2[4U] = { 0U };
-  tmp1[0U] = (uint64_t)0xe893209a45dbb031U;
-  tmp1[1U] = (uint64_t)0x3daa8a1471e8ca7fU;
-  tmp1[2U] = (uint64_t)0xe86c90e49284eb15U;
-  tmp1[3U] = (uint64_t)0x3086d221a7d46bcdU;
-  tmp2[0U] = (uint64_t)0x1571b4ae8ac47f71U;
-  tmp2[1U] = (uint64_t)0x221208ac9df506c6U;
-  tmp2[2U] = (uint64_t)0x6f547fa90abfe4c4U;
-  tmp2[3U] = (uint64_t)0xe4437ed6010e8828U;
+  tmp1[0U] = 0xe893209a45dbb031ULL;
+  tmp1[1U] = 0x3daa8a1471e8ca7fULL;
+  tmp1[2U] = 0xe86c90e49284eb15ULL;
+  tmp1[3U] = 0x3086d221a7d46bcdULL;
+  tmp2[0U] = 0x1571b4ae8ac47f71ULL;
+  tmp2[1U] = 0x221208ac9df506c6ULL;
+  tmp2[2U] = 0x6f547fa90abfe4c4ULL;
+  tmp2[3U] = 0xe4437ed6010e8828ULL;
   qmul_shift_384(r1, k, tmp1);
   qmul_shift_384(r2, k, tmp2);
-  tmp1[0U] = (uint64_t)0x6f547fa90abfe4c3U;
-  tmp1[1U] = (uint64_t)0xe4437ed6010e8828U;
-  tmp1[2U] = (uint64_t)0x0U;
-  tmp1[3U] = (uint64_t)0x0U;
-  tmp2[0U] = (uint64_t)0xd765cda83db1562cU;
-  tmp2[1U] = (uint64_t)0x8a280ac50774346dU;
-  tmp2[2U] = (uint64_t)0xfffffffffffffffeU;
-  tmp2[3U] = (uint64_t)0xffffffffffffffffU;
+  tmp1[0U] = 0x6f547fa90abfe4c3ULL;
+  tmp1[1U] = 0xe4437ed6010e8828ULL;
+  tmp1[2U] = 0x0ULL;
+  tmp1[3U] = 0x0ULL;
+  tmp2[0U] = 0xd765cda83db1562cULL;
+  tmp2[1U] = 0x8a280ac50774346dULL;
+  tmp2[2U] = 0xfffffffffffffffeULL;
+  tmp2[3U] = 0xffffffffffffffffULL;
   qmul(r1, r1, tmp1);
   qmul(r2, r2, tmp2);
-  tmp1[0U] = (uint64_t)0xe0cfc810b51283cfU;
-  tmp1[1U] = (uint64_t)0xa880b9fc8ec739c2U;
-  tmp1[2U] = (uint64_t)0x5ad9e3fd77ed9ba4U;
-  tmp1[3U] = (uint64_t)0xac9c52b33fa3cf1fU;
+  tmp1[0U] = 0xe0cfc810b51283cfULL;
+  tmp1[1U] = 0xa880b9fc8ec739c2ULL;
+  tmp1[2U] = 0x5ad9e3fd77ed9ba4ULL;
+  tmp1[3U] = 0xac9c52b33fa3cf1fULL;
   qadd(r2, r1, r2);
   qmul(tmp2, r2, tmp1);
   qadd(r1, k, tmp2);
@@ -1068,17 +1052,17 @@ static inline void scalar_split_lambda(uint64_t *r1, uint64_t *r2, uint64_t *k)
 static inline void point_mul_lambda(uint64_t *res, uint64_t *p)
 {
   uint64_t *rx = res;
-  uint64_t *ry = res + (uint32_t)5U;
-  uint64_t *rz = res + (uint32_t)10U;
+  uint64_t *ry = res + 5U;
+  uint64_t *rz = res + 10U;
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)5U;
-  uint64_t *pz = p + (uint32_t)10U;
+  uint64_t *py = p + 5U;
+  uint64_t *pz = p + 10U;
   uint64_t beta[5U] = { 0U };
-  beta[0U] = (uint64_t)0x96c28719501eeU;
-  beta[1U] = (uint64_t)0x7512f58995c13U;
-  beta[2U] = (uint64_t)0xc3434e99cf049U;
-  beta[3U] = (uint64_t)0x7106e64479eaU;
-  beta[4U] = (uint64_t)0x7ae96a2b657cU;
+  beta[0U] = 0x96c28719501eeULL;
+  beta[1U] = 0x7512f58995c13ULL;
+  beta[2U] = 0xc3434e99cf049ULL;
+  beta[3U] = 0x7106e64479eaULL;
+  beta[4U] = 0x7ae96a2b657cULL;
   Hacl_K256_Field_fmul(rx, beta, px);
   ry[0U] = py[0U];
   ry[1U] = py[1U];
@@ -1096,11 +1080,11 @@ static inline void point_mul_lambda_inplace(uint64_t *res)
 {
   uint64_t *rx = res;
   uint64_t beta[5U] = { 0U };
-  beta[0U] = (uint64_t)0x96c28719501eeU;
-  beta[1U] = (uint64_t)0x7512f58995c13U;
-  beta[2U] = (uint64_t)0xc3434e99cf049U;
-  beta[3U] = (uint64_t)0x7106e64479eaU;
-  beta[4U] = (uint64_t)0x7ae96a2b657cU;
+  beta[0U] = 0x96c28719501eeULL;
+  beta[1U] = 0x7512f58995c13ULL;
+  beta[2U] = 0xc3434e99cf049ULL;
+  beta[3U] = 0x7106e64479eaULL;
+  beta[4U] = 0x7ae96a2b657cULL;
   Hacl_K256_Field_fmul(rx, beta, rx);
 }
 
@@ -1123,7 +1107,7 @@ ecmult_endo_split(
 {
   scalar_split_lambda(r1, r2, scalar);
   point_mul_lambda(q2, q);
-  memcpy(q1, q, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(q1, q, 15U * sizeof (uint64_t));
   bool b0 = is_qelem_le_q_halved_vartime(r1);
   qnegate_conditional_vartime(r1, !b0);
   point_negate_conditional_vartime(q1, !b0);
@@ -1140,45 +1124,37 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t
   uint64_t table[240U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)15U;
+  uint64_t *t1 = table + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, q, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, q, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, q, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   Hacl_Impl_K256_Point_make_point_at_inf(out);
   uint64_t tmp0[15U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)15U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 15U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)15U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 15U;
       KRML_MAYBE_FOR15(i,
-        (uint32_t)0U,
-        (uint32_t)15U,
-        (uint32_t)1U,
+        0U,
+        15U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -1188,17 +1164,17 @@ void Hacl_Impl_K256_PointMul_point_mul(uint64_t *out, uint64_t *scalar, uint64_t
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 15U;
     KRML_MAYBE_FOR15(i,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
+      0U,
+      15U,
+      1U,
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
       os[i] = x;););
@@ -1208,79 +1184,72 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar)
 {
   uint64_t q1[15U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t
   q2[15U] =
     {
-      (uint64_t)4496295042185355U, (uint64_t)3125448202219451U, (uint64_t)1239608518490046U,
-      (uint64_t)2687445637493112U, (uint64_t)77979604880139U, (uint64_t)3360310474215011U,
-      (uint64_t)1216410458165163U, (uint64_t)177901593587973U, (uint64_t)3209978938104985U,
-      (uint64_t)118285133003718U, (uint64_t)434519962075150U, (uint64_t)1114612377498854U,
-      (uint64_t)3488596944003813U, (uint64_t)450716531072892U, (uint64_t)66044973203836U
+      4496295042185355ULL, 3125448202219451ULL, 1239608518490046ULL, 2687445637493112ULL,
+      77979604880139ULL, 3360310474215011ULL, 1216410458165163ULL, 177901593587973ULL,
+      3209978938104985ULL, 118285133003718ULL, 434519962075150ULL, 1114612377498854ULL,
+      3488596944003813ULL, 450716531072892ULL, 66044973203836ULL
     };
-  KRML_HOST_IGNORE(q2);
+  KRML_MAYBE_UNUSED_VAR(q2);
   uint64_t
   q3[15U] =
     {
-      (uint64_t)1277614565900951U, (uint64_t)378671684419493U, (uint64_t)3176260448102880U,
-      (uint64_t)1575691435565077U, (uint64_t)167304528382180U, (uint64_t)2600787765776588U,
-      (uint64_t)7497946149293U, (uint64_t)2184272641272202U, (uint64_t)2200235265236628U,
-      (uint64_t)265969268774814U, (uint64_t)1913228635640715U, (uint64_t)2831959046949342U,
-      (uint64_t)888030405442963U, (uint64_t)1817092932985033U, (uint64_t)101515844997121U
+      1277614565900951ULL, 378671684419493ULL, 3176260448102880ULL, 1575691435565077ULL,
+      167304528382180ULL, 2600787765776588ULL, 7497946149293ULL, 2184272641272202ULL,
+      2200235265236628ULL, 265969268774814ULL, 1913228635640715ULL, 2831959046949342ULL,
+      888030405442963ULL, 1817092932985033ULL, 101515844997121ULL
     };
-  KRML_HOST_IGNORE(q3);
+  KRML_MAYBE_UNUSED_VAR(q3);
   uint64_t
   q4[15U] =
     {
-      (uint64_t)34056422761564U, (uint64_t)3315864838337811U, (uint64_t)3797032336888745U,
-      (uint64_t)2580641850480806U, (uint64_t)208048944042500U, (uint64_t)1233795288689421U,
-      (uint64_t)1048795233382631U, (uint64_t)646545158071530U, (uint64_t)1816025742137285U,
-      (uint64_t)12245672982162U, (uint64_t)2119364213800870U, (uint64_t)2034960311715107U,
-      (uint64_t)3172697815804487U, (uint64_t)4185144850224160U, (uint64_t)2792055915674U
+      34056422761564ULL, 3315864838337811ULL, 3797032336888745ULL, 2580641850480806ULL,
+      208048944042500ULL, 1233795288689421ULL, 1048795233382631ULL, 646545158071530ULL,
+      1816025742137285ULL, 12245672982162ULL, 2119364213800870ULL, 2034960311715107ULL,
+      3172697815804487ULL, 4185144850224160ULL, 2792055915674ULL
     };
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q4);
   uint64_t *r1 = scalar;
-  uint64_t *r2 = scalar + (uint32_t)1U;
-  uint64_t *r3 = scalar + (uint32_t)2U;
-  uint64_t *r4 = scalar + (uint32_t)3U;
+  uint64_t *r2 = scalar + 1U;
+  uint64_t *r3 = scalar + 2U;
+  uint64_t *r4 = scalar + 3U;
   Hacl_Impl_K256_Point_make_point_at_inf(out);
   uint64_t tmp[15U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0,
-      (uint32_t)0U,
-      (uint32_t)4U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_K256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp););
 }
@@ -1290,75 +1259,65 @@ point_mul_g_double_vartime(uint64_t *out, uint64_t *scalar1, uint64_t *scalar2,
 {
   uint64_t q1[15U] = { 0U };
   uint64_t *gx = q1;
-  uint64_t *gy = q1 + (uint32_t)5U;
-  uint64_t *gz = q1 + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = q1 + 5U;
+  uint64_t *gz = q1 + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t table2[480U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)15U;
+  uint64_t *t1 = table2 + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, q2, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, q2, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   uint64_t tmp0[15U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)15U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)15U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U);
+  const uint64_t *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 15U;
+  memcpy(out, (uint64_t *)a_bits_l, 15U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)15U;
-  memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 15U;
+  memcpy(tmp0, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t));
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0);
   uint64_t tmp1[15U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)15U;
-    memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 15U;
+    memcpy(tmp1, (uint64_t *)a_bits_l1, 15U * sizeof (uint64_t));
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)15U;
-    memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 15U;
+    memcpy(tmp1, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t));
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
   }
 }
@@ -1380,99 +1339,89 @@ point_mul_g_double_split_lambda_table(
   uint64_t table2[480U] = { 0U };
   uint64_t tmp[15U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)15U;
+  uint64_t *t1 = table2 + 15U;
   Hacl_Impl_K256_Point_make_point_at_inf(t0);
-  memcpy(t1, p2, (uint32_t)15U * sizeof (uint64_t));
+  memcpy(t1, p2, 15U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)15U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 15U;
     Hacl_Impl_K256_PointDouble_point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)15U;
+    memcpy(table2 + (2U * i + 2U) * 15U, tmp, 15U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 15U;
     Hacl_Impl_K256_PointAdd_point_add(tmp, p2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)15U,
-      tmp,
-      (uint32_t)15U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 15U, tmp, 15U * sizeof (uint64_t)););
   uint64_t tmp0[15U] = { 0U };
   uint64_t tmp1[15U] = { 0U };
-  uint32_t i0 = (uint32_t)125U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r1, i0, (uint32_t)5U);
+  uint32_t i0 = 125U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)15U;
-  memcpy(out, (uint64_t *)a_bits_l, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 15U;
+  memcpy(out, (uint64_t *)a_bits_l, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(out, is_negate1);
-  uint32_t i1 = (uint32_t)125U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r2, i1, (uint32_t)5U);
+  uint32_t i1 = 125U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
   const
   uint64_t
-  *a_bits_l0 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l320 * (uint32_t)15U;
-  memcpy(tmp1, (uint64_t *)a_bits_l0, (uint32_t)15U * sizeof (uint64_t));
+  *a_bits_l0 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l320 * 15U;
+  memcpy(tmp1, (uint64_t *)a_bits_l0, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp1, is_negate2);
   point_mul_lambda_inplace(tmp1);
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp1);
   uint64_t tmp10[15U] = { 0U };
-  uint32_t i2 = (uint32_t)125U;
-  uint64_t bits_c1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r3, i2, (uint32_t)5U);
+  uint32_t i2 = 125U;
+  uint64_t bits_c1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, i2, 5U);
   uint32_t bits_l321 = (uint32_t)bits_c1;
-  const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)15U;
-  memcpy(tmp0, (uint64_t *)a_bits_l1, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l1 = table2 + bits_l321 * 15U;
+  memcpy(tmp0, (uint64_t *)a_bits_l1, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp0, is_negate3);
-  uint32_t i3 = (uint32_t)125U;
-  uint64_t bits_c2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r4, i3, (uint32_t)5U);
+  uint32_t i3 = 125U;
+  uint64_t bits_c2 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r4, i3, 5U);
   uint32_t bits_l322 = (uint32_t)bits_c2;
-  const uint64_t *a_bits_l2 = table2 + bits_l322 * (uint32_t)15U;
-  memcpy(tmp10, (uint64_t *)a_bits_l2, (uint32_t)15U * sizeof (uint64_t));
+  const uint64_t *a_bits_l2 = table2 + bits_l322 * 15U;
+  memcpy(tmp10, (uint64_t *)a_bits_l2, 15U * sizeof (uint64_t));
   point_negate_conditional_vartime(tmp10, is_negate4);
   point_mul_lambda_inplace(tmp10);
   Hacl_Impl_K256_PointAdd_point_add(tmp0, tmp0, tmp10);
   Hacl_Impl_K256_PointAdd_point_add(out, out, tmp0);
   uint64_t tmp2[15U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)25U; i++)
+  for (uint32_t i = 0U; i < 25U; i++)
   {
-    KRML_MAYBE_FOR5(i4,
-      (uint32_t)0U,
-      (uint32_t)5U,
-      (uint32_t)1U,
-      Hacl_Impl_K256_PointDouble_point_double(out, out););
-    uint32_t k = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r4, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i4, 0U, 5U, 1U, Hacl_Impl_K256_PointDouble_point_double(out, out););
+    uint32_t k = 125U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r4, k, 5U);
     uint32_t bits_l323 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l3 = table2 + bits_l323 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l3, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l3 = table2 + bits_l323 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l3, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate4);
     point_mul_lambda_inplace(tmp2);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k0 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r3, k0, (uint32_t)5U);
+    uint32_t k0 = 125U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r3, k0, 5U);
     uint32_t bits_l324 = (uint32_t)bits_l0;
-    const uint64_t *a_bits_l4 = table2 + bits_l324 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l4, (uint32_t)15U * sizeof (uint64_t));
+    const uint64_t *a_bits_l4 = table2 + bits_l324 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l4, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate3);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k1 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r2, k1, (uint32_t)5U);
+    uint32_t k1 = 125U - 5U * i - 5U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r2, k1, 5U);
     uint32_t bits_l325 = (uint32_t)bits_l1;
     const
     uint64_t
-    *a_bits_l5 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l325 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l5, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l5 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l325 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l5, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate2);
     point_mul_lambda_inplace(tmp2);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
-    uint32_t k2 = (uint32_t)125U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, r1, k2, (uint32_t)5U);
+    uint32_t k2 = 125U - 5U * i - 5U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, r1, k2, 5U);
     uint32_t bits_l326 = (uint32_t)bits_l2;
     const
     uint64_t
-    *a_bits_l6 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l326 * (uint32_t)15U;
-    memcpy(tmp2, (uint64_t *)a_bits_l6, (uint32_t)15U * sizeof (uint64_t));
+    *a_bits_l6 = Hacl_K256_PrecompTable_precomp_basepoint_table_w5 + bits_l326 * 15U;
+    memcpy(tmp2, (uint64_t *)a_bits_l6, 15U * sizeof (uint64_t));
     point_negate_conditional_vartime(tmp2, is_negate1);
     Hacl_Impl_K256_PointAdd_point_add(out, out, tmp2);
   }
@@ -1483,16 +1432,16 @@ check_ecmult_endo_split(uint64_t *r1, uint64_t *r2, uint64_t *r3, uint64_t *r4)
 {
   uint64_t f20 = r1[2U];
   uint64_t f30 = r1[3U];
-  bool b1 = f20 == (uint64_t)0U && f30 == (uint64_t)0U;
+  bool b1 = f20 == 0ULL && f30 == 0ULL;
   uint64_t f21 = r2[2U];
   uint64_t f31 = r2[3U];
-  bool b2 = f21 == (uint64_t)0U && f31 == (uint64_t)0U;
+  bool b2 = f21 == 0ULL && f31 == 0ULL;
   uint64_t f22 = r3[2U];
   uint64_t f32 = r3[3U];
-  bool b3 = f22 == (uint64_t)0U && f32 == (uint64_t)0U;
+  bool b3 = f22 == 0ULL && f32 == 0ULL;
   uint64_t f2 = r4[2U];
   uint64_t f3 = r4[3U];
-  bool b4 = f2 == (uint64_t)0U && f3 == (uint64_t)0U;
+  bool b4 = f2 == 0ULL && f3 == 0ULL;
   return b1 && b2 && b3 && b4;
 }
 
@@ -1515,30 +1464,30 @@ point_mul_g_double_split_lambda_vartime(
 {
   uint64_t g[15U] = { 0U };
   uint64_t *gx = g;
-  uint64_t *gy = g + (uint32_t)5U;
-  uint64_t *gz = g + (uint32_t)10U;
-  gx[0U] = (uint64_t)0x2815b16f81798U;
-  gx[1U] = (uint64_t)0xdb2dce28d959fU;
-  gx[2U] = (uint64_t)0xe870b07029bfcU;
-  gx[3U] = (uint64_t)0xbbac55a06295cU;
-  gx[4U] = (uint64_t)0x79be667ef9dcU;
-  gy[0U] = (uint64_t)0x7d08ffb10d4b8U;
-  gy[1U] = (uint64_t)0x48a68554199c4U;
-  gy[2U] = (uint64_t)0xe1108a8fd17b4U;
-  gy[3U] = (uint64_t)0xc4655da4fbfc0U;
-  gy[4U] = (uint64_t)0x483ada7726a3U;
-  memset(gz, 0U, (uint32_t)5U * sizeof (uint64_t));
-  gz[0U] = (uint64_t)1U;
+  uint64_t *gy = g + 5U;
+  uint64_t *gz = g + 10U;
+  gx[0U] = 0x2815b16f81798ULL;
+  gx[1U] = 0xdb2dce28d959fULL;
+  gx[2U] = 0xe870b07029bfcULL;
+  gx[3U] = 0xbbac55a06295cULL;
+  gx[4U] = 0x79be667ef9dcULL;
+  gy[0U] = 0x7d08ffb10d4b8ULL;
+  gy[1U] = 0x48a68554199c4ULL;
+  gy[2U] = 0xe1108a8fd17b4ULL;
+  gy[3U] = 0xc4655da4fbfc0ULL;
+  gy[4U] = 0x483ada7726a3ULL;
+  memset(gz, 0U, 5U * sizeof (uint64_t));
+  gz[0U] = 1ULL;
   uint64_t r1234[16U] = { 0U };
   uint64_t q1234[60U] = { 0U };
   uint64_t *r1 = r1234;
-  uint64_t *r2 = r1234 + (uint32_t)4U;
-  uint64_t *r3 = r1234 + (uint32_t)8U;
-  uint64_t *r4 = r1234 + (uint32_t)12U;
+  uint64_t *r2 = r1234 + 4U;
+  uint64_t *r3 = r1234 + 8U;
+  uint64_t *r4 = r1234 + 12U;
   uint64_t *q1 = q1234;
-  uint64_t *q2 = q1234 + (uint32_t)15U;
-  uint64_t *q3 = q1234 + (uint32_t)30U;
-  uint64_t *q4 = q1234 + (uint32_t)45U;
+  uint64_t *q2 = q1234 + 15U;
+  uint64_t *q3 = q1234 + 30U;
+  uint64_t *q4 = q1234 + 45U;
   __bool_bool scrut0 = ecmult_endo_split(r1, r2, q1, q2, scalar1, g);
   bool is_high10 = scrut0.fst;
   bool is_high20 = scrut0.snd;
@@ -1615,30 +1564,30 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(
   uint8_t *nonce
 )
 {
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
-  KRML_HOST_IGNORE(oneq);
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
+  KRML_MAYBE_UNUSED_VAR(oneq);
   uint64_t rsdk_q[16U] = { 0U };
   uint64_t *r_q = rsdk_q;
-  uint64_t *s_q = rsdk_q + (uint32_t)4U;
-  uint64_t *d_a = rsdk_q + (uint32_t)8U;
-  uint64_t *k_q = rsdk_q + (uint32_t)12U;
+  uint64_t *s_q = rsdk_q + 4U;
+  uint64_t *d_a = rsdk_q + 8U;
+  uint64_t *k_q = rsdk_q + 12U;
   uint64_t is_b_valid0 = load_qelem_check(d_a, private_key);
-  uint64_t oneq10[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq10[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = d_a;
     uint64_t uu____0 = oneq10[i];
     uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0));
     os[i] = x;);
   uint64_t is_sk_valid = is_b_valid0;
   uint64_t is_b_valid = load_qelem_check(k_q, nonce);
-  uint64_t oneq1[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq1[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = k_q;
     uint64_t uu____1 = oneq1[i];
     uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1));
@@ -1660,11 +1609,11 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(
   qadd(s_q, z, s_q);
   qmul(s_q, kinv, s_q);
   store_qelem(signature, r_q);
-  store_qelem(signature + (uint32_t)32U, s_q);
+  store_qelem(signature + 32U, s_q);
   uint64_t is_r_zero = is_qelem_zero(r_q);
   uint64_t is_s_zero = is_qelem_zero(s_q);
   uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero);
-  bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool res = m == 0xFFFFFFFFFFFFFFFFULL;
   return res;
 }
 
@@ -1691,7 +1640,7 @@ Hacl_K256_ECDSA_ecdsa_sign_sha256(
 )
 {
   uint8_t msgHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, msgHash);
+  Hacl_Hash_SHA2_hash_256(msgHash, msg, msg_len);
   bool b = Hacl_K256_ECDSA_ecdsa_sign_hashed_msg(signature, msgHash, private_key, nonce);
   return b;
 }
@@ -1713,14 +1662,14 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
 {
   uint64_t tmp[35U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *r_q = tmp + (uint32_t)15U;
-  uint64_t *s_q = tmp + (uint32_t)19U;
-  uint64_t *u1 = tmp + (uint32_t)23U;
-  uint64_t *u2 = tmp + (uint32_t)27U;
-  uint64_t *m_q = tmp + (uint32_t)31U;
+  uint64_t *r_q = tmp + 15U;
+  uint64_t *s_q = tmp + 19U;
+  uint64_t *u1 = tmp + 23U;
+  uint64_t *u2 = tmp + 27U;
+  uint64_t *m_q = tmp + 31U;
   bool is_pk_valid = load_point_vartime(pk, public_key);
   bool is_r_valid = load_qelem_vartime(r_q, signature);
-  bool is_s_valid = load_qelem_vartime(s_q, signature + (uint32_t)32U);
+  bool is_s_valid = load_qelem_vartime(s_q, signature + 32U);
   bool is_rs_valid = is_r_valid && is_s_valid;
   load_qelem_modq(m_q, m);
   if (!(is_pk_valid && is_rs_valid))
@@ -1734,7 +1683,7 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
   uint64_t res[15U] = { 0U };
   point_mul_g_double_split_lambda_vartime(res, u1, u2, pk);
   uint64_t tmp1[5U] = { 0U };
-  uint64_t *pz = res + (uint32_t)10U;
+  uint64_t *pz = res + 10U;
   Hacl_K256_Field_fnormalize(tmp1, pz);
   bool b = Hacl_K256_Field_is_felem_zero_vartime(tmp1);
   if (b)
@@ -1742,7 +1691,7 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
     return false;
   }
   uint64_t *x = res;
-  uint64_t *z = res + (uint32_t)10U;
+  uint64_t *z = res + 10U;
   uint8_t r_bytes[32U] = { 0U };
   uint64_t r_fe[5U] = { 0U };
   uint64_t tmp_q[5U] = { 0U };
@@ -1756,11 +1705,11 @@ Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(uint8_t *m, uint8_t *public_key, uint8_t
     bool is_r_lt_p_m_q = Hacl_K256_Field_is_felem_lt_prime_minus_order_vartime(r_fe);
     if (is_r_lt_p_m_q)
     {
-      tmp_q[0U] = (uint64_t)0x25e8cd0364141U;
-      tmp_q[1U] = (uint64_t)0xe6af48a03bbfdU;
-      tmp_q[2U] = (uint64_t)0xffffffebaaedcU;
-      tmp_q[3U] = (uint64_t)0xfffffffffffffU;
-      tmp_q[4U] = (uint64_t)0xffffffffffffU;
+      tmp_q[0U] = 0x25e8cd0364141ULL;
+      tmp_q[1U] = 0xe6af48a03bbfdULL;
+      tmp_q[2U] = 0xffffffebaaedcULL;
+      tmp_q[3U] = 0xfffffffffffffULL;
+      tmp_q[4U] = 0xffffffffffffULL;
       Hacl_K256_Field_fadd(tmp_q, r_fe, tmp_q);
       return fmul_eq_vartime(tmp_q, z, tmp_x);
     }
@@ -1790,7 +1739,7 @@ Hacl_K256_ECDSA_ecdsa_verify_sha256(
 )
 {
   uint8_t mHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
+  Hacl_Hash_SHA2_hash_256(mHash, msg, msg_len);
   bool b = Hacl_K256_ECDSA_ecdsa_verify_hashed_msg(mHash, public_key, signature);
   return b;
 }
@@ -1805,7 +1754,7 @@ Compute canonical lowest S value for `signature` (R || S).
 bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature)
 {
   uint64_t s_q[4U] = { 0U };
-  uint8_t *s = signature + (uint32_t)32U;
+  uint8_t *s = signature + 32U;
   bool is_sk_valid = load_qelem_vartime(s_q, s);
   if (!is_sk_valid)
   {
@@ -1813,7 +1762,7 @@ bool Hacl_K256_ECDSA_secp256k1_ecdsa_signature_normalize(uint8_t *signature)
   }
   bool is_sk_lt_q_halved = is_qelem_le_q_halved_vartime(s_q);
   qnegate_conditional_vartime(s_q, !is_sk_lt_q_halved);
-  store_qelem(signature + (uint32_t)32U, s_q);
+  store_qelem(signature + 32U, s_q);
   return true;
 }
 
@@ -1827,7 +1776,7 @@ Check whether `signature` (R || S) is in canonical form.
 bool Hacl_K256_ECDSA_secp256k1_ecdsa_is_signature_normalized(uint8_t *signature)
 {
   uint64_t s_q[4U] = { 0U };
-  uint8_t *s = signature + (uint32_t)32U;
+  uint8_t *s = signature + 32U;
   bool is_s_valid = load_qelem_vartime(s_q, s);
   bool is_s_lt_q_halved = is_qelem_le_q_halved_vartime(s_q);
   return is_s_valid && is_s_lt_q_halved;
@@ -1886,7 +1835,7 @@ Hacl_K256_ECDSA_secp256k1_ecdsa_sign_sha256(
 )
 {
   uint8_t msgHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, msgHash);
+  Hacl_Hash_SHA2_hash_256(msgHash, msg, msg_len);
   bool
   b = Hacl_K256_ECDSA_secp256k1_ecdsa_sign_hashed_msg(signature, msgHash, private_key, nonce);
   return b;
@@ -1940,7 +1889,7 @@ Hacl_K256_ECDSA_secp256k1_ecdsa_verify_sha256(
 )
 {
   uint8_t mHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
+  Hacl_Hash_SHA2_hash_256(mHash, msg, msg_len);
   bool b = Hacl_K256_ECDSA_secp256k1_ecdsa_verify_hashed_msg(mHash, public_key, signature);
   return b;
 }
@@ -1971,11 +1920,11 @@ Convert a public key from uncompressed to its raw form.
 bool Hacl_K256_ECDSA_public_key_uncompressed_to_raw(uint8_t *pk_raw, uint8_t *pk)
 {
   uint8_t pk0 = pk[0U];
-  if (pk0 != (uint8_t)0x04U)
+  if (pk0 != 0x04U)
   {
     return false;
   }
-  memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(pk_raw, pk + 1U, 64U * sizeof (uint8_t));
   return true;
 }
 
@@ -1989,8 +1938,8 @@ Convert a public key from raw to its uncompressed form.
 */
 void Hacl_K256_ECDSA_public_key_uncompressed_from_raw(uint8_t *pk, uint8_t *pk_raw)
 {
-  pk[0U] = (uint8_t)0x04U;
-  memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof (uint8_t));
+  pk[0U] = 0x04U;
+  memcpy(pk + 1U, pk_raw, 64U * sizeof (uint8_t));
 }
 
 /**
@@ -2007,12 +1956,12 @@ bool Hacl_K256_ECDSA_public_key_compressed_to_raw(uint8_t *pk_raw, uint8_t *pk)
 {
   uint64_t xa[5U] = { 0U };
   uint64_t ya[5U] = { 0U };
-  uint8_t *pk_xb = pk + (uint32_t)1U;
+  uint8_t *pk_xb = pk + 1U;
   bool b = aff_point_decompress_vartime(xa, ya, pk);
   if (b)
   {
-    memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof (uint8_t));
-    Hacl_K256_Field_store_felem(pk_raw + (uint32_t)32U, ya);
+    memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t));
+    Hacl_K256_Field_store_felem(pk_raw + 32U, ya);
   }
   return b;
 }
@@ -2028,20 +1977,20 @@ Convert a public key from raw to its compressed form.
 void Hacl_K256_ECDSA_public_key_compressed_from_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint8_t *pk_x = pk_raw;
-  uint8_t *pk_y = pk_raw + (uint32_t)32U;
+  uint8_t *pk_y = pk_raw + 32U;
   uint8_t x0 = pk_y[31U];
-  bool is_pk_y_odd = (x0 & (uint8_t)1U) == (uint8_t)1U;
+  bool is_pk_y_odd = ((uint32_t)x0 & 1U) == 1U;
   uint8_t ite;
   if (is_pk_y_odd)
   {
-    ite = (uint8_t)0x03U;
+    ite = 0x03U;
   }
   else
   {
-    ite = (uint8_t)0x02U;
+    ite = 0x02U;
   }
   pk[0U] = ite;
-  memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(pk + 1U, pk_x, 32U * sizeof (uint8_t));
 }
 
 
@@ -2084,7 +2033,7 @@ bool Hacl_K256_ECDSA_is_private_key_valid(uint8_t *private_key)
 {
   uint64_t s_q[4U] = { 0U };
   uint64_t res = load_qelem_check(s_q, private_key);
-  return res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return res == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 
@@ -2107,13 +2056,13 @@ bool Hacl_K256_ECDSA_secret_to_public(uint8_t *public_key, uint8_t *private_key)
 {
   uint64_t tmp[19U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *sk = tmp + (uint32_t)15U;
+  uint64_t *sk = tmp + 15U;
   uint64_t is_b_valid = load_qelem_check(sk, private_key);
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -2121,7 +2070,7 @@ bool Hacl_K256_ECDSA_secret_to_public(uint8_t *public_key, uint8_t *private_key)
   uint64_t is_sk_valid = is_b_valid;
   point_mul_g(pk, sk);
   Hacl_Impl_K256_Point_point_store(public_key, pk);
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -2140,15 +2089,15 @@ bool Hacl_K256_ECDSA_ecdh(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t
 {
   uint64_t tmp[34U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *ss = tmp + (uint32_t)15U;
-  uint64_t *sk = tmp + (uint32_t)30U;
+  uint64_t *ss = tmp + 15U;
+  uint64_t *sk = tmp + 30U;
   bool is_pk_valid = load_point_vartime(pk, their_pubkey);
   uint64_t is_b_valid = load_qelem_check(sk, private_key);
-  uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U };
+  uint64_t oneq[4U] = { 0x1ULL, 0x0ULL, 0x0ULL, 0x0ULL };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -2159,6 +2108,6 @@ bool Hacl_K256_ECDSA_ecdh(uint8_t *shared_secret, uint8_t *their_pubkey, uint8_t
     Hacl_Impl_K256_PointMul_point_mul(ss, sk, pk);
     Hacl_Impl_K256_Point_point_store(shared_secret, ss);
   }
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL && is_pk_valid;
 }
 
diff --git a/src/msvc/Hacl_MAC_Poly1305.c b/src/msvc/Hacl_MAC_Poly1305.c
new file mode 100644
index 00000000..28cbca5a
--- /dev/null
+++ b/src/msvc/Hacl_MAC_Poly1305.c
@@ -0,0 +1,712 @@
+/* MIT License
+ *
+ * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
+ * Copyright (c) 2022-2023 HACL* Contributors
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include "internal/Hacl_MAC_Poly1305.h"
+
+void Hacl_MAC_Poly1305_poly1305_init(uint64_t *ctx, uint8_t *key)
+{
+  uint64_t *acc = ctx;
+  uint64_t *pre = ctx + 5U;
+  uint8_t *kr = key;
+  acc[0U] = 0ULL;
+  acc[1U] = 0ULL;
+  acc[2U] = 0ULL;
+  acc[3U] = 0ULL;
+  acc[4U] = 0ULL;
+  uint64_t u0 = load64_le(kr);
+  uint64_t lo = u0;
+  uint64_t u = load64_le(kr + 8U);
+  uint64_t hi = u;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
+  uint64_t lo1 = lo & mask0;
+  uint64_t hi1 = hi & mask1;
+  uint64_t *r = pre;
+  uint64_t *r5 = pre + 5U;
+  uint64_t *rn = pre + 10U;
+  uint64_t *rn_5 = pre + 15U;
+  uint64_t r_vec0 = lo1;
+  uint64_t r_vec1 = hi1;
+  uint64_t f00 = r_vec0 & 0x3ffffffULL;
+  uint64_t f10 = r_vec0 >> 26U & 0x3ffffffULL;
+  uint64_t f20 = r_vec0 >> 52U | (r_vec1 & 0x3fffULL) << 12U;
+  uint64_t f30 = r_vec1 >> 14U & 0x3ffffffULL;
+  uint64_t f40 = r_vec1 >> 40U;
+  uint64_t f0 = f00;
+  uint64_t f1 = f10;
+  uint64_t f2 = f20;
+  uint64_t f3 = f30;
+  uint64_t f4 = f40;
+  r[0U] = f0;
+  r[1U] = f1;
+  r[2U] = f2;
+  r[3U] = f3;
+  r[4U] = f4;
+  uint64_t f200 = r[0U];
+  uint64_t f21 = r[1U];
+  uint64_t f22 = r[2U];
+  uint64_t f23 = r[3U];
+  uint64_t f24 = r[4U];
+  r5[0U] = f200 * 5ULL;
+  r5[1U] = f21 * 5ULL;
+  r5[2U] = f22 * 5ULL;
+  r5[3U] = f23 * 5ULL;
+  r5[4U] = f24 * 5ULL;
+  rn[0U] = r[0U];
+  rn[1U] = r[1U];
+  rn[2U] = r[2U];
+  rn[3U] = r[3U];
+  rn[4U] = r[4U];
+  rn_5[0U] = r5[0U];
+  rn_5[1U] = r5[1U];
+  rn_5[2U] = r5[2U];
+  rn_5[3U] = r5[3U];
+  rn_5[4U] = r5[4U];
+}
+
+static void poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
+{
+  uint64_t *pre = ctx + 5U;
+  uint64_t *acc = ctx;
+  uint32_t nb = len / 16U;
+  uint32_t rem = len % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
+  {
+    uint8_t *block = text + i * 16U;
+    uint64_t e[5U] = { 0U };
+    uint64_t u0 = load64_le(block);
+    uint64_t lo = u0;
+    uint64_t u = load64_le(block + 8U);
+    uint64_t hi = u;
+    uint64_t f0 = lo;
+    uint64_t f1 = hi;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
+    uint64_t f01 = f010;
+    uint64_t f111 = f110;
+    uint64_t f2 = f20;
+    uint64_t f3 = f30;
+    uint64_t f41 = f40;
+    e[0U] = f01;
+    e[1U] = f111;
+    e[2U] = f2;
+    e[3U] = f3;
+    e[4U] = f41;
+    uint64_t b = 0x1000000ULL;
+    uint64_t mask = b;
+    uint64_t f4 = e[4U];
+    e[4U] = f4 | mask;
+    uint64_t *r = pre;
+    uint64_t *r5 = pre + 5U;
+    uint64_t r0 = r[0U];
+    uint64_t r1 = r[1U];
+    uint64_t r2 = r[2U];
+    uint64_t r3 = r[3U];
+    uint64_t r4 = r[4U];
+    uint64_t r51 = r5[1U];
+    uint64_t r52 = r5[2U];
+    uint64_t r53 = r5[3U];
+    uint64_t r54 = r5[4U];
+    uint64_t f10 = e[0U];
+    uint64_t f11 = e[1U];
+    uint64_t f12 = e[2U];
+    uint64_t f13 = e[3U];
+    uint64_t f14 = e[4U];
+    uint64_t a0 = acc[0U];
+    uint64_t a1 = acc[1U];
+    uint64_t a2 = acc[2U];
+    uint64_t a3 = acc[3U];
+    uint64_t a4 = acc[4U];
+    uint64_t a01 = a0 + f10;
+    uint64_t a11 = a1 + f11;
+    uint64_t a21 = a2 + f12;
+    uint64_t a31 = a3 + f13;
+    uint64_t a41 = a4 + f14;
+    uint64_t a02 = r0 * a01;
+    uint64_t a12 = r1 * a01;
+    uint64_t a22 = r2 * a01;
+    uint64_t a32 = r3 * a01;
+    uint64_t a42 = r4 * a01;
+    uint64_t a03 = a02 + r54 * a11;
+    uint64_t a13 = a12 + r0 * a11;
+    uint64_t a23 = a22 + r1 * a11;
+    uint64_t a33 = a32 + r2 * a11;
+    uint64_t a43 = a42 + r3 * a11;
+    uint64_t a04 = a03 + r53 * a21;
+    uint64_t a14 = a13 + r54 * a21;
+    uint64_t a24 = a23 + r0 * a21;
+    uint64_t a34 = a33 + r1 * a21;
+    uint64_t a44 = a43 + r2 * a21;
+    uint64_t a05 = a04 + r52 * a31;
+    uint64_t a15 = a14 + r53 * a31;
+    uint64_t a25 = a24 + r54 * a31;
+    uint64_t a35 = a34 + r0 * a31;
+    uint64_t a45 = a44 + r1 * a31;
+    uint64_t a06 = a05 + r51 * a41;
+    uint64_t a16 = a15 + r52 * a41;
+    uint64_t a26 = a25 + r53 * a41;
+    uint64_t a36 = a35 + r54 * a41;
+    uint64_t a46 = a45 + r0 * a41;
+    uint64_t t0 = a06;
+    uint64_t t1 = a16;
+    uint64_t t2 = a26;
+    uint64_t t3 = a36;
+    uint64_t t4 = a46;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
+    uint64_t x0 = t0 & mask26;
+    uint64_t x3 = t3 & mask26;
+    uint64_t x1 = t1 + z0;
+    uint64_t x4 = t4 + z1;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
+    uint64_t z12 = z11 + t;
+    uint64_t x11 = x1 & mask26;
+    uint64_t x41 = x4 & mask26;
+    uint64_t x2 = t2 + z01;
+    uint64_t x01 = x0 + z12;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
+    uint64_t x21 = x2 & mask26;
+    uint64_t x02 = x01 & mask26;
+    uint64_t x31 = x3 + z02;
+    uint64_t x12 = x11 + z13;
+    uint64_t z03 = x31 >> 26U;
+    uint64_t x32 = x31 & mask26;
+    uint64_t x42 = x41 + z03;
+    uint64_t o0 = x02;
+    uint64_t o1 = x12;
+    uint64_t o2 = x21;
+    uint64_t o3 = x32;
+    uint64_t o4 = x42;
+    acc[0U] = o0;
+    acc[1U] = o1;
+    acc[2U] = o2;
+    acc[3U] = o3;
+    acc[4U] = o4;
+  }
+  if (rem > 0U)
+  {
+    uint8_t *last = text + nb * 16U;
+    uint64_t e[5U] = { 0U };
+    uint8_t tmp[16U] = { 0U };
+    memcpy(tmp, last, rem * sizeof (uint8_t));
+    uint64_t u0 = load64_le(tmp);
+    uint64_t lo = u0;
+    uint64_t u = load64_le(tmp + 8U);
+    uint64_t hi = u;
+    uint64_t f0 = lo;
+    uint64_t f1 = hi;
+    uint64_t f010 = f0 & 0x3ffffffULL;
+    uint64_t f110 = f0 >> 26U & 0x3ffffffULL;
+    uint64_t f20 = f0 >> 52U | (f1 & 0x3fffULL) << 12U;
+    uint64_t f30 = f1 >> 14U & 0x3ffffffULL;
+    uint64_t f40 = f1 >> 40U;
+    uint64_t f01 = f010;
+    uint64_t f111 = f110;
+    uint64_t f2 = f20;
+    uint64_t f3 = f30;
+    uint64_t f4 = f40;
+    e[0U] = f01;
+    e[1U] = f111;
+    e[2U] = f2;
+    e[3U] = f3;
+    e[4U] = f4;
+    uint64_t b = 1ULL << rem * 8U % 26U;
+    uint64_t mask = b;
+    uint64_t fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = fi | mask;
+    uint64_t *r = pre;
+    uint64_t *r5 = pre + 5U;
+    uint64_t r0 = r[0U];
+    uint64_t r1 = r[1U];
+    uint64_t r2 = r[2U];
+    uint64_t r3 = r[3U];
+    uint64_t r4 = r[4U];
+    uint64_t r51 = r5[1U];
+    uint64_t r52 = r5[2U];
+    uint64_t r53 = r5[3U];
+    uint64_t r54 = r5[4U];
+    uint64_t f10 = e[0U];
+    uint64_t f11 = e[1U];
+    uint64_t f12 = e[2U];
+    uint64_t f13 = e[3U];
+    uint64_t f14 = e[4U];
+    uint64_t a0 = acc[0U];
+    uint64_t a1 = acc[1U];
+    uint64_t a2 = acc[2U];
+    uint64_t a3 = acc[3U];
+    uint64_t a4 = acc[4U];
+    uint64_t a01 = a0 + f10;
+    uint64_t a11 = a1 + f11;
+    uint64_t a21 = a2 + f12;
+    uint64_t a31 = a3 + f13;
+    uint64_t a41 = a4 + f14;
+    uint64_t a02 = r0 * a01;
+    uint64_t a12 = r1 * a01;
+    uint64_t a22 = r2 * a01;
+    uint64_t a32 = r3 * a01;
+    uint64_t a42 = r4 * a01;
+    uint64_t a03 = a02 + r54 * a11;
+    uint64_t a13 = a12 + r0 * a11;
+    uint64_t a23 = a22 + r1 * a11;
+    uint64_t a33 = a32 + r2 * a11;
+    uint64_t a43 = a42 + r3 * a11;
+    uint64_t a04 = a03 + r53 * a21;
+    uint64_t a14 = a13 + r54 * a21;
+    uint64_t a24 = a23 + r0 * a21;
+    uint64_t a34 = a33 + r1 * a21;
+    uint64_t a44 = a43 + r2 * a21;
+    uint64_t a05 = a04 + r52 * a31;
+    uint64_t a15 = a14 + r53 * a31;
+    uint64_t a25 = a24 + r54 * a31;
+    uint64_t a35 = a34 + r0 * a31;
+    uint64_t a45 = a44 + r1 * a31;
+    uint64_t a06 = a05 + r51 * a41;
+    uint64_t a16 = a15 + r52 * a41;
+    uint64_t a26 = a25 + r53 * a41;
+    uint64_t a36 = a35 + r54 * a41;
+    uint64_t a46 = a45 + r0 * a41;
+    uint64_t t0 = a06;
+    uint64_t t1 = a16;
+    uint64_t t2 = a26;
+    uint64_t t3 = a36;
+    uint64_t t4 = a46;
+    uint64_t mask26 = 0x3ffffffULL;
+    uint64_t z0 = t0 >> 26U;
+    uint64_t z1 = t3 >> 26U;
+    uint64_t x0 = t0 & mask26;
+    uint64_t x3 = t3 & mask26;
+    uint64_t x1 = t1 + z0;
+    uint64_t x4 = t4 + z1;
+    uint64_t z01 = x1 >> 26U;
+    uint64_t z11 = x4 >> 26U;
+    uint64_t t = z11 << 2U;
+    uint64_t z12 = z11 + t;
+    uint64_t x11 = x1 & mask26;
+    uint64_t x41 = x4 & mask26;
+    uint64_t x2 = t2 + z01;
+    uint64_t x01 = x0 + z12;
+    uint64_t z02 = x2 >> 26U;
+    uint64_t z13 = x01 >> 26U;
+    uint64_t x21 = x2 & mask26;
+    uint64_t x02 = x01 & mask26;
+    uint64_t x31 = x3 + z02;
+    uint64_t x12 = x11 + z13;
+    uint64_t z03 = x31 >> 26U;
+    uint64_t x32 = x31 & mask26;
+    uint64_t x42 = x41 + z03;
+    uint64_t o0 = x02;
+    uint64_t o1 = x12;
+    uint64_t o2 = x21;
+    uint64_t o3 = x32;
+    uint64_t o4 = x42;
+    acc[0U] = o0;
+    acc[1U] = o1;
+    acc[2U] = o2;
+    acc[3U] = o3;
+    acc[4U] = o4;
+    return;
+  }
+}
+
+void Hacl_MAC_Poly1305_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
+{
+  uint64_t *acc = ctx;
+  uint8_t *ks = key + 16U;
+  uint64_t f0 = acc[0U];
+  uint64_t f13 = acc[1U];
+  uint64_t f23 = acc[2U];
+  uint64_t f33 = acc[3U];
+  uint64_t f40 = acc[4U];
+  uint64_t l0 = f0 + 0ULL;
+  uint64_t tmp00 = l0 & 0x3ffffffULL;
+  uint64_t c00 = l0 >> 26U;
+  uint64_t l1 = f13 + c00;
+  uint64_t tmp10 = l1 & 0x3ffffffULL;
+  uint64_t c10 = l1 >> 26U;
+  uint64_t l2 = f23 + c10;
+  uint64_t tmp20 = l2 & 0x3ffffffULL;
+  uint64_t c20 = l2 >> 26U;
+  uint64_t l3 = f33 + c20;
+  uint64_t tmp30 = l3 & 0x3ffffffULL;
+  uint64_t c30 = l3 >> 26U;
+  uint64_t l4 = f40 + c30;
+  uint64_t tmp40 = l4 & 0x3ffffffULL;
+  uint64_t c40 = l4 >> 26U;
+  uint64_t f010 = tmp00 + c40 * 5ULL;
+  uint64_t f110 = tmp10;
+  uint64_t f210 = tmp20;
+  uint64_t f310 = tmp30;
+  uint64_t f410 = tmp40;
+  uint64_t l = f010 + 0ULL;
+  uint64_t tmp0 = l & 0x3ffffffULL;
+  uint64_t c0 = l >> 26U;
+  uint64_t l5 = f110 + c0;
+  uint64_t tmp1 = l5 & 0x3ffffffULL;
+  uint64_t c1 = l5 >> 26U;
+  uint64_t l6 = f210 + c1;
+  uint64_t tmp2 = l6 & 0x3ffffffULL;
+  uint64_t c2 = l6 >> 26U;
+  uint64_t l7 = f310 + c2;
+  uint64_t tmp3 = l7 & 0x3ffffffULL;
+  uint64_t c3 = l7 >> 26U;
+  uint64_t l8 = f410 + c3;
+  uint64_t tmp4 = l8 & 0x3ffffffULL;
+  uint64_t c4 = l8 >> 26U;
+  uint64_t f02 = tmp0 + c4 * 5ULL;
+  uint64_t f12 = tmp1;
+  uint64_t f22 = tmp2;
+  uint64_t f32 = tmp3;
+  uint64_t f42 = tmp4;
+  uint64_t mh = 0x3ffffffULL;
+  uint64_t ml = 0x3fffffbULL;
+  uint64_t mask = FStar_UInt64_eq_mask(f42, mh);
+  uint64_t mask1 = mask & FStar_UInt64_eq_mask(f32, mh);
+  uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f22, mh);
+  uint64_t mask3 = mask2 & FStar_UInt64_eq_mask(f12, mh);
+  uint64_t mask4 = mask3 & ~~FStar_UInt64_gte_mask(f02, ml);
+  uint64_t ph = mask4 & mh;
+  uint64_t pl = mask4 & ml;
+  uint64_t o0 = f02 - pl;
+  uint64_t o1 = f12 - ph;
+  uint64_t o2 = f22 - ph;
+  uint64_t o3 = f32 - ph;
+  uint64_t o4 = f42 - ph;
+  uint64_t f011 = o0;
+  uint64_t f111 = o1;
+  uint64_t f211 = o2;
+  uint64_t f311 = o3;
+  uint64_t f411 = o4;
+  acc[0U] = f011;
+  acc[1U] = f111;
+  acc[2U] = f211;
+  acc[3U] = f311;
+  acc[4U] = f411;
+  uint64_t f00 = acc[0U];
+  uint64_t f1 = acc[1U];
+  uint64_t f2 = acc[2U];
+  uint64_t f3 = acc[3U];
+  uint64_t f4 = acc[4U];
+  uint64_t f01 = f00;
+  uint64_t f112 = f1;
+  uint64_t f212 = f2;
+  uint64_t f312 = f3;
+  uint64_t f41 = f4;
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
+  uint64_t f10 = lo;
+  uint64_t f11 = hi;
+  uint64_t u0 = load64_le(ks);
+  uint64_t lo0 = u0;
+  uint64_t u = load64_le(ks + 8U);
+  uint64_t hi0 = u;
+  uint64_t f20 = lo0;
+  uint64_t f21 = hi0;
+  uint64_t r0 = f10 + f20;
+  uint64_t r1 = f11 + f21;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
+  uint64_t r11 = r1 + c;
+  uint64_t f30 = r0;
+  uint64_t f31 = r11;
+  store64_le(tag, f30);
+  store64_le(tag + 8U, f31);
+}
+
+Hacl_MAC_Poly1305_state_t *Hacl_MAC_Poly1305_malloc(uint8_t *key)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(16U, sizeof (uint8_t));
+  uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC(25U, sizeof (uint64_t));
+  uint64_t *block_state = r1;
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_0 = k_;
+  Hacl_MAC_Poly1305_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
+  Hacl_MAC_Poly1305_state_t
+  *p = (Hacl_MAC_Poly1305_state_t *)KRML_HOST_MALLOC(sizeof (Hacl_MAC_Poly1305_state_t));
+  p[0U] = s;
+  Hacl_MAC_Poly1305_poly1305_init(block_state, key);
+  return p;
+}
+
+void Hacl_MAC_Poly1305_reset(Hacl_MAC_Poly1305_state_t *state, uint8_t *key)
+{
+  Hacl_MAC_Poly1305_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  uint64_t *block_state = scrut.block_state;
+  Hacl_MAC_Poly1305_poly1305_init(block_state, key);
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_1 = k_;
+  Hacl_MAC_Poly1305_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
+  state[0U] = tmp;
+}
+
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_MAC_Poly1305_update(Hacl_MAC_Poly1305_state_t *state, uint8_t *chunk, uint32_t chunk_len)
+{
+  Hacl_MAC_Poly1305_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)16U == 0ULL && total_len > 0ULL)
+  {
+    sz = 16U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)16U);
+  }
+  if (chunk_len <= 16U - sz)
+  {
+    Hacl_MAC_Poly1305_state_t s1 = *state;
+    uint64_t *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 16U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_MAC_Poly1305_state_t s1 = *state;
+    uint64_t *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 16U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 16U, buf);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)16U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 16U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)16U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 16U;
+    uint32_t data1_len = n_blocks * 16U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len,
+          .p_key = k_1
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 16U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_MAC_Poly1305_state_t s1 = *state;
+    uint64_t *block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)16U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 16U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)16U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+    Hacl_MAC_Poly1305_state_t s10 = *state;
+    uint64_t *block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint8_t *k_10 = s10.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)16U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 16U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)16U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 16U, buf);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)16U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 16U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)16U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 16U;
+    uint32_t data1_len = n_blocks * 16U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff),
+          .p_key = k_10
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+void Hacl_MAC_Poly1305_digest(Hacl_MAC_Poly1305_state_t *state, uint8_t *output)
+{
+  Hacl_MAC_Poly1305_state_t scrut = *state;
+  uint64_t *block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint8_t *k_ = scrut.p_key;
+  uint32_t r;
+  if (total_len % (uint64_t)16U == 0ULL && total_len > 0ULL)
+  {
+    r = 16U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)16U);
+  }
+  uint8_t *buf_1 = buf_;
+  uint64_t r1[25U] = { 0U };
+  uint64_t *tmp_block_state = r1;
+  memcpy(tmp_block_state, block_state, 25U * sizeof (uint64_t));
+  uint32_t ite;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite = 16U;
+  }
+  else
+  {
+    ite = r % 16U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite;
+  uint8_t *buf_multi = buf_1;
+  poly1305_update(tmp_block_state, 0U, buf_multi);
+  poly1305_update(tmp_block_state, r, buf_last);
+  uint64_t tmp[25U] = { 0U };
+  memcpy(tmp, tmp_block_state, 25U * sizeof (uint64_t));
+  Hacl_MAC_Poly1305_poly1305_finish(output, k_, tmp);
+}
+
+void Hacl_MAC_Poly1305_free(Hacl_MAC_Poly1305_state_t *state)
+{
+  Hacl_MAC_Poly1305_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  uint64_t *block_state = scrut.block_state;
+  KRML_HOST_FREE(k_);
+  KRML_HOST_FREE(block_state);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+void Hacl_MAC_Poly1305_mac(uint8_t *output, uint8_t *input, uint32_t input_len, uint8_t *key)
+{
+  uint64_t ctx[25U] = { 0U };
+  Hacl_MAC_Poly1305_poly1305_init(ctx, key);
+  poly1305_update(ctx, input_len, input);
+  Hacl_MAC_Poly1305_poly1305_finish(output, key, ctx);
+}
+
diff --git a/src/Hacl_Poly1305_128.c b/src/msvc/Hacl_MAC_Poly1305_Simd128.c
similarity index 66%
rename from src/Hacl_Poly1305_128.c
rename to src/msvc/Hacl_MAC_Poly1305_Simd128.c
index f400fe82..17e26978 100644
--- a/src/Hacl_Poly1305_128.c
+++ b/src/msvc/Hacl_MAC_Poly1305_Simd128.c
@@ -23,40 +23,34 @@
  */
 
 
-#include "internal/Hacl_Poly1305_128.h"
+#include "internal/Hacl_MAC_Poly1305_Simd128.h"
 
-void
-Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b)
+void Hacl_MAC_Poly1305_Simd128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc, uint8_t *b)
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
   Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(b);
-  Lib_IntVector_Intrinsics_vec128
-  b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + (uint32_t)16U);
+  Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(b + 16U);
   Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
   Lib_IntVector_Intrinsics_vec128 hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
   Lib_IntVector_Intrinsics_vec128
   f00 =
     Lib_IntVector_Intrinsics_vec128_and(lo,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f10 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
   Lib_IntVector_Intrinsics_vec128 f02 = f00;
   Lib_IntVector_Intrinsics_vec128 f12 = f10;
   Lib_IntVector_Intrinsics_vec128 f22 = f20;
@@ -67,7 +61,7 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
   e[2U] = f22;
   e[3U] = f32;
   e[4U] = f42;
-  uint64_t b10 = (uint64_t)0x1000000U;
+  uint64_t b10 = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b10);
   Lib_IntVector_Intrinsics_vec128 f43 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec128_or(f43, mask);
@@ -81,16 +75,11 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
   Lib_IntVector_Intrinsics_vec128 e2 = e[2U];
   Lib_IntVector_Intrinsics_vec128 e3 = e[3U];
   Lib_IntVector_Intrinsics_vec128 e4 = e[4U];
-  Lib_IntVector_Intrinsics_vec128
-  f0 = Lib_IntVector_Intrinsics_vec128_insert64(acc0, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f1 = Lib_IntVector_Intrinsics_vec128_insert64(acc1, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f2 = Lib_IntVector_Intrinsics_vec128_insert64(acc2, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f3 = Lib_IntVector_Intrinsics_vec128_insert64(acc3, (uint64_t)0U, (uint32_t)1U);
-  Lib_IntVector_Intrinsics_vec128
-  f4 = Lib_IntVector_Intrinsics_vec128_insert64(acc4, (uint64_t)0U, (uint32_t)1U);
+  Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_insert64(acc0, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_insert64(acc1, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f2 = Lib_IntVector_Intrinsics_vec128_insert64(acc2, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f3 = Lib_IntVector_Intrinsics_vec128_insert64(acc3, 0ULL, 1U);
+  Lib_IntVector_Intrinsics_vec128 f4 = Lib_IntVector_Intrinsics_vec128_insert64(acc4, 0ULL, 1U);
   Lib_IntVector_Intrinsics_vec128 f01 = Lib_IntVector_Intrinsics_vec128_add64(f0, e0);
   Lib_IntVector_Intrinsics_vec128 f11 = Lib_IntVector_Intrinsics_vec128_add64(f1, e1);
   Lib_IntVector_Intrinsics_vec128 f21 = Lib_IntVector_Intrinsics_vec128_add64(f2, e2);
@@ -109,13 +98,13 @@ Hacl_Impl_Poly1305_Field32xN_128_load_acc2(Lib_IntVector_Intrinsics_vec128 *acc,
 }
 
 void
-Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
+Hacl_MAC_Poly1305_Simd128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128 *out,
   Lib_IntVector_Intrinsics_vec128 *p
 )
 {
   Lib_IntVector_Intrinsics_vec128 *r = p;
-  Lib_IntVector_Intrinsics_vec128 *r2 = p + (uint32_t)10U;
+  Lib_IntVector_Intrinsics_vec128 *r2 = p + 10U;
   Lib_IntVector_Intrinsics_vec128 a0 = out[0U];
   Lib_IntVector_Intrinsics_vec128 a1 = out[1U];
   Lib_IntVector_Intrinsics_vec128 a2 = out[2U];
@@ -141,14 +130,10 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   r231 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r23, r13);
   Lib_IntVector_Intrinsics_vec128
   r241 = Lib_IntVector_Intrinsics_vec128_interleave_low64(r24, r14);
-  Lib_IntVector_Intrinsics_vec128
-  r251 = Lib_IntVector_Intrinsics_vec128_smul64(r211, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r252 = Lib_IntVector_Intrinsics_vec128_smul64(r221, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r253 = Lib_IntVector_Intrinsics_vec128_smul64(r231, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec128
-  r254 = Lib_IntVector_Intrinsics_vec128_smul64(r241, (uint64_t)5U);
+  Lib_IntVector_Intrinsics_vec128 r251 = Lib_IntVector_Intrinsics_vec128_smul64(r211, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r252 = Lib_IntVector_Intrinsics_vec128_smul64(r221, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r253 = Lib_IntVector_Intrinsics_vec128_smul64(r231, 5ULL);
+  Lib_IntVector_Intrinsics_vec128 r254 = Lib_IntVector_Intrinsics_vec128_smul64(r241, 5ULL);
   Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_mul64(r201, a0);
   Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_mul64(r211, a0);
   Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_mul64(r221, a0);
@@ -239,37 +224,28 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128 t2 = a25;
   Lib_IntVector_Intrinsics_vec128 t3 = a35;
   Lib_IntVector_Intrinsics_vec128 t4 = a45;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -302,41 +278,36 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   Lib_IntVector_Intrinsics_vec128
   tmp0 =
     Lib_IntVector_Intrinsics_vec128_and(l,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec128 l0 = Lib_IntVector_Intrinsics_vec128_add64(o11, c0);
   Lib_IntVector_Intrinsics_vec128
   tmp1 =
     Lib_IntVector_Intrinsics_vec128_and(l0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(o21, c1);
   Lib_IntVector_Intrinsics_vec128
   tmp2 =
     Lib_IntVector_Intrinsics_vec128_and(l1,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(o31, c2);
   Lib_IntVector_Intrinsics_vec128
   tmp3 =
     Lib_IntVector_Intrinsics_vec128_and(l2,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(o41, c3);
   Lib_IntVector_Intrinsics_vec128
   tmp4 =
     Lib_IntVector_Intrinsics_vec128_and(l3,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec128
   o00 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec128 o1 = tmp1;
   Lib_IntVector_Intrinsics_vec128 o2 = tmp2;
   Lib_IntVector_Intrinsics_vec128 o3 = tmp3;
@@ -348,10 +319,11 @@ Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(
   out[4U] = o4;
 }
 
-void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key)
+void
+Hacl_MAC_Poly1305_Simd128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *key)
 {
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   uint8_t *kr = key;
   acc[0U] = Lib_IntVector_Intrinsics_vec128_zero;
   acc[1U] = Lib_IntVector_Intrinsics_vec128_zero;
@@ -360,41 +332,38 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   acc[4U] = Lib_IntVector_Intrinsics_vec128_zero;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec128 *rn = pre + (uint32_t)10U;
-  Lib_IntVector_Intrinsics_vec128 *rn_5 = pre + (uint32_t)15U;
+  Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
+  Lib_IntVector_Intrinsics_vec128 *rn = pre + 10U;
+  Lib_IntVector_Intrinsics_vec128 *rn_5 = pre + 15U;
   Lib_IntVector_Intrinsics_vec128 r_vec0 = Lib_IntVector_Intrinsics_vec128_load64(lo1);
   Lib_IntVector_Intrinsics_vec128 r_vec1 = Lib_IntVector_Intrinsics_vec128_load64(hi1);
   Lib_IntVector_Intrinsics_vec128
   f00 =
     Lib_IntVector_Intrinsics_vec128_and(r_vec0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f15 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, 26U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
   f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec0, 52U),
       Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(r_vec1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec128
   f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, 14U),
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, (uint32_t)40U);
+  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(r_vec1, 40U);
   Lib_IntVector_Intrinsics_vec128 f0 = f00;
   Lib_IntVector_Intrinsics_vec128 f1 = f15;
   Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -410,11 +379,11 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 f220 = r[2U];
   Lib_IntVector_Intrinsics_vec128 f230 = r[3U];
   Lib_IntVector_Intrinsics_vec128 f240 = r[4U];
-  r5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f200, (uint64_t)5U);
-  r5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f210, (uint64_t)5U);
-  r5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f220, (uint64_t)5U);
-  r5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f230, (uint64_t)5U);
-  r5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f240, (uint64_t)5U);
+  r5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f200, 5ULL);
+  r5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f210, 5ULL);
+  r5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f220, 5ULL);
+  r5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f230, 5ULL);
+  r5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f240, 5ULL);
   Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
   Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -511,37 +480,28 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 t2 = a24;
   Lib_IntVector_Intrinsics_vec128 t3 = a34;
   Lib_IntVector_Intrinsics_vec128 t4 = a44;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
   Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -559,275 +519,56 @@ void Hacl_Poly1305_128_poly1305_init(Lib_IntVector_Intrinsics_vec128 *ctx, uint8
   Lib_IntVector_Intrinsics_vec128 f22 = rn[2U];
   Lib_IntVector_Intrinsics_vec128 f23 = rn[3U];
   Lib_IntVector_Intrinsics_vec128 f24 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f201, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f21, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f22, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f23, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f24, (uint64_t)5U);
-}
-
-void Hacl_Poly1305_128_poly1305_update1(Lib_IntVector_Intrinsics_vec128 *ctx, uint8_t *text)
-{
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
-  uint64_t u0 = load64_le(text);
-  uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
-  uint64_t hi = u;
-  Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
-  Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
-  Lib_IntVector_Intrinsics_vec128
-  f010 =
-    Lib_IntVector_Intrinsics_vec128_and(f0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f110 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f20 =
-    Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-        (uint32_t)52U),
-      Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
-  Lib_IntVector_Intrinsics_vec128
-  f30 =
-    Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
-  Lib_IntVector_Intrinsics_vec128 f01 = f010;
-  Lib_IntVector_Intrinsics_vec128 f111 = f110;
-  Lib_IntVector_Intrinsics_vec128 f2 = f20;
-  Lib_IntVector_Intrinsics_vec128 f3 = f30;
-  Lib_IntVector_Intrinsics_vec128 f41 = f40;
-  e[0U] = f01;
-  e[1U] = f111;
-  e[2U] = f2;
-  e[3U] = f3;
-  e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
-  Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-  Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
-  e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-  Lib_IntVector_Intrinsics_vec128 *r = pre;
-  Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
-  Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
-  Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
-  Lib_IntVector_Intrinsics_vec128 r3 = r[3U];
-  Lib_IntVector_Intrinsics_vec128 r4 = r[4U];
-  Lib_IntVector_Intrinsics_vec128 r51 = r5[1U];
-  Lib_IntVector_Intrinsics_vec128 r52 = r5[2U];
-  Lib_IntVector_Intrinsics_vec128 r53 = r5[3U];
-  Lib_IntVector_Intrinsics_vec128 r54 = r5[4U];
-  Lib_IntVector_Intrinsics_vec128 f10 = e[0U];
-  Lib_IntVector_Intrinsics_vec128 f11 = e[1U];
-  Lib_IntVector_Intrinsics_vec128 f12 = e[2U];
-  Lib_IntVector_Intrinsics_vec128 f13 = e[3U];
-  Lib_IntVector_Intrinsics_vec128 f14 = e[4U];
-  Lib_IntVector_Intrinsics_vec128 a0 = acc[0U];
-  Lib_IntVector_Intrinsics_vec128 a1 = acc[1U];
-  Lib_IntVector_Intrinsics_vec128 a2 = acc[2U];
-  Lib_IntVector_Intrinsics_vec128 a3 = acc[3U];
-  Lib_IntVector_Intrinsics_vec128 a4 = acc[4U];
-  Lib_IntVector_Intrinsics_vec128 a01 = Lib_IntVector_Intrinsics_vec128_add64(a0, f10);
-  Lib_IntVector_Intrinsics_vec128 a11 = Lib_IntVector_Intrinsics_vec128_add64(a1, f11);
-  Lib_IntVector_Intrinsics_vec128 a21 = Lib_IntVector_Intrinsics_vec128_add64(a2, f12);
-  Lib_IntVector_Intrinsics_vec128 a31 = Lib_IntVector_Intrinsics_vec128_add64(a3, f13);
-  Lib_IntVector_Intrinsics_vec128 a41 = Lib_IntVector_Intrinsics_vec128_add64(a4, f14);
-  Lib_IntVector_Intrinsics_vec128 a02 = Lib_IntVector_Intrinsics_vec128_mul64(r0, a01);
-  Lib_IntVector_Intrinsics_vec128 a12 = Lib_IntVector_Intrinsics_vec128_mul64(r1, a01);
-  Lib_IntVector_Intrinsics_vec128 a22 = Lib_IntVector_Intrinsics_vec128_mul64(r2, a01);
-  Lib_IntVector_Intrinsics_vec128 a32 = Lib_IntVector_Intrinsics_vec128_mul64(r3, a01);
-  Lib_IntVector_Intrinsics_vec128 a42 = Lib_IntVector_Intrinsics_vec128_mul64(r4, a01);
-  Lib_IntVector_Intrinsics_vec128
-  a03 =
-    Lib_IntVector_Intrinsics_vec128_add64(a02,
-      Lib_IntVector_Intrinsics_vec128_mul64(r54, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a13 =
-    Lib_IntVector_Intrinsics_vec128_add64(a12,
-      Lib_IntVector_Intrinsics_vec128_mul64(r0, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a23 =
-    Lib_IntVector_Intrinsics_vec128_add64(a22,
-      Lib_IntVector_Intrinsics_vec128_mul64(r1, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a33 =
-    Lib_IntVector_Intrinsics_vec128_add64(a32,
-      Lib_IntVector_Intrinsics_vec128_mul64(r2, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a43 =
-    Lib_IntVector_Intrinsics_vec128_add64(a42,
-      Lib_IntVector_Intrinsics_vec128_mul64(r3, a11));
-  Lib_IntVector_Intrinsics_vec128
-  a04 =
-    Lib_IntVector_Intrinsics_vec128_add64(a03,
-      Lib_IntVector_Intrinsics_vec128_mul64(r53, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a14 =
-    Lib_IntVector_Intrinsics_vec128_add64(a13,
-      Lib_IntVector_Intrinsics_vec128_mul64(r54, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a24 =
-    Lib_IntVector_Intrinsics_vec128_add64(a23,
-      Lib_IntVector_Intrinsics_vec128_mul64(r0, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a34 =
-    Lib_IntVector_Intrinsics_vec128_add64(a33,
-      Lib_IntVector_Intrinsics_vec128_mul64(r1, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a44 =
-    Lib_IntVector_Intrinsics_vec128_add64(a43,
-      Lib_IntVector_Intrinsics_vec128_mul64(r2, a21));
-  Lib_IntVector_Intrinsics_vec128
-  a05 =
-    Lib_IntVector_Intrinsics_vec128_add64(a04,
-      Lib_IntVector_Intrinsics_vec128_mul64(r52, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a15 =
-    Lib_IntVector_Intrinsics_vec128_add64(a14,
-      Lib_IntVector_Intrinsics_vec128_mul64(r53, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a25 =
-    Lib_IntVector_Intrinsics_vec128_add64(a24,
-      Lib_IntVector_Intrinsics_vec128_mul64(r54, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a35 =
-    Lib_IntVector_Intrinsics_vec128_add64(a34,
-      Lib_IntVector_Intrinsics_vec128_mul64(r0, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a45 =
-    Lib_IntVector_Intrinsics_vec128_add64(a44,
-      Lib_IntVector_Intrinsics_vec128_mul64(r1, a31));
-  Lib_IntVector_Intrinsics_vec128
-  a06 =
-    Lib_IntVector_Intrinsics_vec128_add64(a05,
-      Lib_IntVector_Intrinsics_vec128_mul64(r51, a41));
-  Lib_IntVector_Intrinsics_vec128
-  a16 =
-    Lib_IntVector_Intrinsics_vec128_add64(a15,
-      Lib_IntVector_Intrinsics_vec128_mul64(r52, a41));
-  Lib_IntVector_Intrinsics_vec128
-  a26 =
-    Lib_IntVector_Intrinsics_vec128_add64(a25,
-      Lib_IntVector_Intrinsics_vec128_mul64(r53, a41));
-  Lib_IntVector_Intrinsics_vec128
-  a36 =
-    Lib_IntVector_Intrinsics_vec128_add64(a35,
-      Lib_IntVector_Intrinsics_vec128_mul64(r54, a41));
-  Lib_IntVector_Intrinsics_vec128
-  a46 =
-    Lib_IntVector_Intrinsics_vec128_add64(a45,
-      Lib_IntVector_Intrinsics_vec128_mul64(r0, a41));
-  Lib_IntVector_Intrinsics_vec128 t0 = a06;
-  Lib_IntVector_Intrinsics_vec128 t1 = a16;
-  Lib_IntVector_Intrinsics_vec128 t2 = a26;
-  Lib_IntVector_Intrinsics_vec128 t3 = a36;
-  Lib_IntVector_Intrinsics_vec128 t4 = a46;
-  Lib_IntVector_Intrinsics_vec128
-  mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t0, mask26);
-  Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
-  Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
-  Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec128
-  z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
-  Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
-  Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
-  Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
-  Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
-  Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec128
-  z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128
-  z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
-  Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
-  Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
-  Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec128
-  z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
-  Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
-  Lib_IntVector_Intrinsics_vec128 o0 = x02;
-  Lib_IntVector_Intrinsics_vec128 o1 = x12;
-  Lib_IntVector_Intrinsics_vec128 o2 = x21;
-  Lib_IntVector_Intrinsics_vec128 o3 = x32;
-  Lib_IntVector_Intrinsics_vec128 o4 = x42;
-  acc[0U] = o0;
-  acc[1U] = o1;
-  acc[2U] = o2;
-  acc[3U] = o3;
-  acc[4U] = o4;
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec128_smul64(f201, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec128_smul64(f21, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec128_smul64(f22, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec128_smul64(f23, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec128_smul64(f24, 5ULL);
 }
 
-void
-Hacl_Poly1305_128_poly1305_update(
-  Lib_IntVector_Intrinsics_vec128 *ctx,
-  uint32_t len,
-  uint8_t *text
-)
+static void poly1305_update(Lib_IntVector_Intrinsics_vec128 *ctx, uint32_t len, uint8_t *text)
 {
-  Lib_IntVector_Intrinsics_vec128 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec128 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  uint32_t sz_block = (uint32_t)32U;
+  uint32_t sz_block = 32U;
   uint32_t len0 = len / sz_block * sz_block;
   uint8_t *t0 = text;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)32U;
+    uint32_t bs = 32U;
     uint8_t *text0 = t0;
-    Hacl_Impl_Poly1305_Field32xN_128_load_acc2(acc, text0);
+    Hacl_MAC_Poly1305_Simd128_load_acc2(acc, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t0 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
       Lib_IntVector_Intrinsics_vec128 b1 = Lib_IntVector_Intrinsics_vec128_load64_le(block);
-      Lib_IntVector_Intrinsics_vec128
-      b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + (uint32_t)16U);
+      Lib_IntVector_Intrinsics_vec128 b2 = Lib_IntVector_Intrinsics_vec128_load64_le(block + 16U);
       Lib_IntVector_Intrinsics_vec128 lo = Lib_IntVector_Intrinsics_vec128_interleave_low64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       hi = Lib_IntVector_Intrinsics_vec128_interleave_high64(b1, b2);
       Lib_IntVector_Intrinsics_vec128
       f00 =
         Lib_IntVector_Intrinsics_vec128_and(lo,
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f15 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)26U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 26U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
       Lib_IntVector_Intrinsics_vec128
       f25 =
-        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo,
-            (uint32_t)52U),
+        Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(lo, 52U),
           Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(hi,
-              Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-            (uint32_t)12U));
+              Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+            12U));
       Lib_IntVector_Intrinsics_vec128
       f30 =
-        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi,
-            (uint32_t)14U),
-          Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-      Lib_IntVector_Intrinsics_vec128
-      f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, (uint32_t)40U);
+        Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 14U),
+          Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+      Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(hi, 40U);
       Lib_IntVector_Intrinsics_vec128 f0 = f00;
       Lib_IntVector_Intrinsics_vec128 f1 = f15;
       Lib_IntVector_Intrinsics_vec128 f2 = f25;
@@ -838,12 +579,12 @@ Hacl_Poly1305_128_poly1305_update(
       e[2U] = f2;
       e[3U] = f3;
       e[4U] = f41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
       Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec128 *rn = pre + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec128 *rn5 = pre + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec128 *rn = pre + 10U;
+      Lib_IntVector_Intrinsics_vec128 *rn5 = pre + 15U;
       Lib_IntVector_Intrinsics_vec128 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec128 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec128 r2 = rn[2U];
@@ -948,37 +689,28 @@ Hacl_Poly1305_128_poly1305_update(
       Lib_IntVector_Intrinsics_vec128 t2 = a24;
       Lib_IntVector_Intrinsics_vec128 t3 = a34;
       Lib_IntVector_Intrinsics_vec128 t4 = a44;
-      Lib_IntVector_Intrinsics_vec128
-      mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec128
-      z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec128
-      z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
       Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec128
-      z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec128
-      z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec128
-      z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec128 o00 = x02;
@@ -1012,45 +744,41 @@ Hacl_Poly1305_128_poly1305_update(
       acc[3U] = o3;
       acc[4U] = o4;
     }
-    Hacl_Impl_Poly1305_Field32xN_128_fmul_r2_normalize(acc, pre);
+    Hacl_MAC_Poly1305_Simd128_fmul_r2_normalize(acc, pre);
   }
   uint32_t len1 = len - len0;
   uint8_t *t1 = text + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t1 + i * (uint32_t)16U;
+    uint8_t *block = t1 + i * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -1061,12 +789,12 @@ Hacl_Poly1305_128_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
     Lib_IntVector_Intrinsics_vec128 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec128_or(f4, mask);
     Lib_IntVector_Intrinsics_vec128 *r = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1181,37 +909,28 @@ Hacl_Poly1305_128_poly1305_update(
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1225,41 +944,37 @@ Hacl_Poly1305_128_poly1305_update(
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = t1 + nb * (uint32_t)16U;
+    uint8_t *last = t1 + nb * 16U;
     KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 e[5U] KRML_POST_ALIGN(16) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec128 f0 = Lib_IntVector_Intrinsics_vec128_load64(lo);
     Lib_IntVector_Intrinsics_vec128 f1 = Lib_IntVector_Intrinsics_vec128_load64(hi);
     Lib_IntVector_Intrinsics_vec128
     f010 =
       Lib_IntVector_Intrinsics_vec128_and(f0,
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f110 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec128
     f20 =
-      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec128_or(Lib_IntVector_Intrinsics_vec128_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec128_shift_left64(Lib_IntVector_Intrinsics_vec128_and(f1,
-            Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec128_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec128
     f30 =
-      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec128
-    f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec128 f40 = Lib_IntVector_Intrinsics_vec128_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec128 f01 = f010;
     Lib_IntVector_Intrinsics_vec128 f111 = f110;
     Lib_IntVector_Intrinsics_vec128 f2 = f20;
@@ -1270,12 +985,12 @@ Hacl_Poly1305_128_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_load64(b);
-    Lib_IntVector_Intrinsics_vec128 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec128 fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = Lib_IntVector_Intrinsics_vec128_or(fi, mask);
     Lib_IntVector_Intrinsics_vec128 *r = pre;
-    Lib_IntVector_Intrinsics_vec128 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec128 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec128 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec128 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec128 r2 = r[2U];
@@ -1390,37 +1105,28 @@ Hacl_Poly1305_128_poly1305_update(
     Lib_IntVector_Intrinsics_vec128 t2 = a26;
     Lib_IntVector_Intrinsics_vec128 t3 = a36;
     Lib_IntVector_Intrinsics_vec128 t4 = a46;
-    Lib_IntVector_Intrinsics_vec128
-    mask26 = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec128
-    z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 mask26 = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec128 z0 = Lib_IntVector_Intrinsics_vec128_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec128 z1 = Lib_IntVector_Intrinsics_vec128_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec128 x0 = Lib_IntVector_Intrinsics_vec128_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec128 x3 = Lib_IntVector_Intrinsics_vec128_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec128 x1 = Lib_IntVector_Intrinsics_vec128_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec128 x4 = Lib_IntVector_Intrinsics_vec128_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec128
-    z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec128 z01 = Lib_IntVector_Intrinsics_vec128_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec128 z11 = Lib_IntVector_Intrinsics_vec128_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec128 t = Lib_IntVector_Intrinsics_vec128_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec128 z12 = Lib_IntVector_Intrinsics_vec128_add64(z11, t);
     Lib_IntVector_Intrinsics_vec128 x11 = Lib_IntVector_Intrinsics_vec128_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec128 x41 = Lib_IntVector_Intrinsics_vec128_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec128 x2 = Lib_IntVector_Intrinsics_vec128_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec128 x01 = Lib_IntVector_Intrinsics_vec128_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec128
-    z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec128
-    z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z02 = Lib_IntVector_Intrinsics_vec128_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec128 z13 = Lib_IntVector_Intrinsics_vec128_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec128 x21 = Lib_IntVector_Intrinsics_vec128_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec128 x02 = Lib_IntVector_Intrinsics_vec128_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec128 x31 = Lib_IntVector_Intrinsics_vec128_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec128 x12 = Lib_IntVector_Intrinsics_vec128_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec128
-    z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec128 z03 = Lib_IntVector_Intrinsics_vec128_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec128 x32 = Lib_IntVector_Intrinsics_vec128_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec128 x42 = Lib_IntVector_Intrinsics_vec128_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec128 o0 = x02;
@@ -1438,14 +1144,14 @@ Hacl_Poly1305_128_poly1305_update(
 }
 
 void
-Hacl_Poly1305_128_poly1305_finish(
+Hacl_MAC_Poly1305_Simd128_poly1305_finish(
   uint8_t *tag,
   uint8_t *key,
   Lib_IntVector_Intrinsics_vec128 *ctx
 )
 {
   Lib_IntVector_Intrinsics_vec128 *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   Lib_IntVector_Intrinsics_vec128 f0 = acc[0U];
   Lib_IntVector_Intrinsics_vec128 f13 = acc[1U];
   Lib_IntVector_Intrinsics_vec128 f23 = acc[2U];
@@ -1456,41 +1162,36 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128
   tmp00 =
     Lib_IntVector_Intrinsics_vec128_and(l0,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c00 = Lib_IntVector_Intrinsics_vec128_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec128 l1 = Lib_IntVector_Intrinsics_vec128_add64(f13, c00);
   Lib_IntVector_Intrinsics_vec128
   tmp10 =
     Lib_IntVector_Intrinsics_vec128_and(l1,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c10 = Lib_IntVector_Intrinsics_vec128_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec128 l2 = Lib_IntVector_Intrinsics_vec128_add64(f23, c10);
   Lib_IntVector_Intrinsics_vec128
   tmp20 =
     Lib_IntVector_Intrinsics_vec128_and(l2,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c20 = Lib_IntVector_Intrinsics_vec128_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec128 l3 = Lib_IntVector_Intrinsics_vec128_add64(f33, c20);
   Lib_IntVector_Intrinsics_vec128
   tmp30 =
     Lib_IntVector_Intrinsics_vec128_and(l3,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c30 = Lib_IntVector_Intrinsics_vec128_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec128 l4 = Lib_IntVector_Intrinsics_vec128_add64(f40, c30);
   Lib_IntVector_Intrinsics_vec128
   tmp40 =
     Lib_IntVector_Intrinsics_vec128_and(l4,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c40 = Lib_IntVector_Intrinsics_vec128_shift_right64(l4, 26U);
   Lib_IntVector_Intrinsics_vec128
   f010 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp00,
-      Lib_IntVector_Intrinsics_vec128_smul64(c40, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c40, 5ULL));
   Lib_IntVector_Intrinsics_vec128 f110 = tmp10;
   Lib_IntVector_Intrinsics_vec128 f210 = tmp20;
   Lib_IntVector_Intrinsics_vec128 f310 = tmp30;
@@ -1500,49 +1201,42 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128
   tmp0 =
     Lib_IntVector_Intrinsics_vec128_and(l,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c0 = Lib_IntVector_Intrinsics_vec128_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec128 l5 = Lib_IntVector_Intrinsics_vec128_add64(f110, c0);
   Lib_IntVector_Intrinsics_vec128
   tmp1 =
     Lib_IntVector_Intrinsics_vec128_and(l5,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c1 = Lib_IntVector_Intrinsics_vec128_shift_right64(l5, 26U);
   Lib_IntVector_Intrinsics_vec128 l6 = Lib_IntVector_Intrinsics_vec128_add64(f210, c1);
   Lib_IntVector_Intrinsics_vec128
   tmp2 =
     Lib_IntVector_Intrinsics_vec128_and(l6,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c2 = Lib_IntVector_Intrinsics_vec128_shift_right64(l6, 26U);
   Lib_IntVector_Intrinsics_vec128 l7 = Lib_IntVector_Intrinsics_vec128_add64(f310, c2);
   Lib_IntVector_Intrinsics_vec128
   tmp3 =
     Lib_IntVector_Intrinsics_vec128_and(l7,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c3 = Lib_IntVector_Intrinsics_vec128_shift_right64(l7, 26U);
   Lib_IntVector_Intrinsics_vec128 l8 = Lib_IntVector_Intrinsics_vec128_add64(f410, c3);
   Lib_IntVector_Intrinsics_vec128
   tmp4 =
     Lib_IntVector_Intrinsics_vec128_and(l8,
-      Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec128
-  c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec128 c4 = Lib_IntVector_Intrinsics_vec128_shift_right64(l8, 26U);
   Lib_IntVector_Intrinsics_vec128
   f02 =
     Lib_IntVector_Intrinsics_vec128_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec128_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec128_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec128 f12 = tmp1;
   Lib_IntVector_Intrinsics_vec128 f22 = tmp2;
   Lib_IntVector_Intrinsics_vec128 f32 = tmp3;
   Lib_IntVector_Intrinsics_vec128 f42 = tmp4;
-  Lib_IntVector_Intrinsics_vec128
-  mh = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec128
-  ml = Lib_IntVector_Intrinsics_vec128_load64((uint64_t)0x3fffffbU);
+  Lib_IntVector_Intrinsics_vec128 mh = Lib_IntVector_Intrinsics_vec128_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec128 ml = Lib_IntVector_Intrinsics_vec128_load64(0x3fffffbULL);
   Lib_IntVector_Intrinsics_vec128 mask = Lib_IntVector_Intrinsics_vec128_eq64(f42, mh);
   Lib_IntVector_Intrinsics_vec128
   mask1 =
@@ -1582,36 +1276,334 @@ Hacl_Poly1305_128_poly1305_finish(
   Lib_IntVector_Intrinsics_vec128 f2 = acc[2U];
   Lib_IntVector_Intrinsics_vec128 f3 = acc[3U];
   Lib_IntVector_Intrinsics_vec128 f4 = acc[4U];
-  uint64_t f01 = Lib_IntVector_Intrinsics_vec128_extract64(f00, (uint32_t)0U);
-  uint64_t f112 = Lib_IntVector_Intrinsics_vec128_extract64(f1, (uint32_t)0U);
-  uint64_t f212 = Lib_IntVector_Intrinsics_vec128_extract64(f2, (uint32_t)0U);
-  uint64_t f312 = Lib_IntVector_Intrinsics_vec128_extract64(f3, (uint32_t)0U);
-  uint64_t f41 = Lib_IntVector_Intrinsics_vec128_extract64(f4, (uint32_t)0U);
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t f01 = Lib_IntVector_Intrinsics_vec128_extract64(f00, 0U);
+  uint64_t f112 = Lib_IntVector_Intrinsics_vec128_extract64(f1, 0U);
+  uint64_t f212 = Lib_IntVector_Intrinsics_vec128_extract64(f2, 0U);
+  uint64_t f312 = Lib_IntVector_Intrinsics_vec128_extract64(f3, 0U);
+  uint64_t f41 = Lib_IntVector_Intrinsics_vec128_extract64(f4, 0U);
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
+}
+
+Hacl_MAC_Poly1305_Simd128_state_t *Hacl_MAC_Poly1305_Simd128_malloc(uint8_t *key)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  Lib_IntVector_Intrinsics_vec128
+  *r1 =
+    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
+      sizeof (Lib_IntVector_Intrinsics_vec128) * 25U);
+  memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Lib_IntVector_Intrinsics_vec128 *block_state = r1;
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_0 = k_;
+  Hacl_MAC_Poly1305_Simd128_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
+  Hacl_MAC_Poly1305_Simd128_state_t
+  *p =
+    (Hacl_MAC_Poly1305_Simd128_state_t *)KRML_HOST_MALLOC(sizeof (
+        Hacl_MAC_Poly1305_Simd128_state_t
+      ));
+  p[0U] = s;
+  Hacl_MAC_Poly1305_Simd128_poly1305_init(block_state, key);
+  return p;
+}
+
+void Hacl_MAC_Poly1305_Simd128_reset(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *key)
+{
+  Hacl_MAC_Poly1305_Simd128_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
+  Hacl_MAC_Poly1305_Simd128_poly1305_init(block_state, key);
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_1 = k_;
+  Hacl_MAC_Poly1305_Simd128_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
+  state[0U] = tmp;
 }
 
-void Hacl_Poly1305_128_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_MAC_Poly1305_Simd128_update(
+  Hacl_MAC_Poly1305_Simd128_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+)
+{
+  Hacl_MAC_Poly1305_Simd128_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)32U == 0ULL && total_len > 0ULL)
+  {
+    sz = 32U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)32U);
+  }
+  if (chunk_len <= 32U - sz)
+  {
+    Hacl_MAC_Poly1305_Simd128_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 32U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_MAC_Poly1305_Simd128_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 32U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 32U, buf);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)32U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 32U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)32U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 32U;
+    uint32_t data1_len = n_blocks * 32U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len,
+          .p_key = k_1
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 32U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_MAC_Poly1305_Simd128_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec128 *block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)32U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 32U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)32U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd128_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+    Hacl_MAC_Poly1305_Simd128_state_t s10 = *state;
+    Lib_IntVector_Intrinsics_vec128 *block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint8_t *k_10 = s10.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)32U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 32U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)32U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 32U, buf);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)32U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 32U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)32U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 32U;
+    uint32_t data1_len = n_blocks * 32U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd128_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff),
+          .p_key = k_10
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+void
+Hacl_MAC_Poly1305_Simd128_digest(Hacl_MAC_Poly1305_Simd128_state_t *state, uint8_t *output)
+{
+  Hacl_MAC_Poly1305_Simd128_state_t scrut = *state;
+  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint8_t *k_ = scrut.p_key;
+  uint32_t r;
+  if (total_len % (uint64_t)32U == 0ULL && total_len > 0ULL)
+  {
+    r = 32U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)32U);
+  }
+  uint8_t *buf_1 = buf_;
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 r1[25U] KRML_POST_ALIGN(16) = { 0U };
+  Lib_IntVector_Intrinsics_vec128 *tmp_block_state = r1;
+  memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  uint32_t ite0;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite0 = 16U;
+  }
+  else
+  {
+    ite0 = r % 16U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite0;
+  uint8_t *buf_multi = buf_1;
+  uint32_t ite;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite = 16U;
+  }
+  else
+  {
+    ite = r % 16U;
+  }
+  poly1305_update(tmp_block_state, r - ite, buf_multi);
+  uint32_t ite1;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite1 = 16U;
+  }
+  else
+  {
+    ite1 = r % 16U;
+  }
+  poly1305_update(tmp_block_state, ite1, buf_last);
+  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 tmp[25U] KRML_POST_ALIGN(16) = { 0U };
+  memcpy(tmp, tmp_block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  Hacl_MAC_Poly1305_Simd128_poly1305_finish(output, k_, tmp);
+}
+
+void Hacl_MAC_Poly1305_Simd128_free(Hacl_MAC_Poly1305_Simd128_state_t *state)
+{
+  Hacl_MAC_Poly1305_Simd128_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
+  KRML_HOST_FREE(k_);
+  KRML_ALIGNED_FREE(block_state);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+void
+Hacl_MAC_Poly1305_Simd128_mac(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key
+)
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ctx[25U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Poly1305_128_poly1305_init(ctx, key);
-  Hacl_Poly1305_128_poly1305_update(ctx, len, text);
-  Hacl_Poly1305_128_poly1305_finish(tag, key, ctx);
+  Hacl_MAC_Poly1305_Simd128_poly1305_init(ctx, key);
+  poly1305_update(ctx, input_len, input);
+  Hacl_MAC_Poly1305_Simd128_poly1305_finish(output, key, ctx);
 }
 
diff --git a/src/Hacl_Poly1305_256.c b/src/msvc/Hacl_MAC_Poly1305_Simd256.c
similarity index 71%
rename from src/Hacl_Poly1305_256.c
rename to src/msvc/Hacl_MAC_Poly1305_Simd256.c
index db28cdc7..f25e8fff 100644
--- a/src/Hacl_Poly1305_256.c
+++ b/src/msvc/Hacl_MAC_Poly1305_Simd256.c
@@ -23,39 +23,30 @@
  */
 
 
-#include "internal/Hacl_Poly1305_256.h"
+#include "internal/Hacl_MAC_Poly1305_Simd256.h"
 
-void
-Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b)
+void Hacl_MAC_Poly1305_Simd256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc, uint8_t *b)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
   Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(b);
-  Lib_IntVector_Intrinsics_vec256
-  hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + (uint32_t)32U);
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+  Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(b + 32U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
   Lib_IntVector_Intrinsics_vec256 m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
   Lib_IntVector_Intrinsics_vec256
   m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-  Lib_IntVector_Intrinsics_vec256
-  m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-  Lib_IntVector_Intrinsics_vec256
-  m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+  Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+  Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
   Lib_IntVector_Intrinsics_vec256 m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
   Lib_IntVector_Intrinsics_vec256 t0 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
   Lib_IntVector_Intrinsics_vec256 t3 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-  Lib_IntVector_Intrinsics_vec256
-  t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)4U);
+  Lib_IntVector_Intrinsics_vec256 t2 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 4U);
   Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t2, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 t1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
   Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t1, mask26);
   Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)30U);
+  Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 30U);
   Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask26);
-  Lib_IntVector_Intrinsics_vec256
-  o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+  Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
   Lib_IntVector_Intrinsics_vec256 o0 = o5;
   Lib_IntVector_Intrinsics_vec256 o1 = o10;
   Lib_IntVector_Intrinsics_vec256 o2 = o20;
@@ -66,7 +57,7 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
   e[2U] = o2;
   e[3U] = o3;
   e[4U] = o4;
-  uint64_t b1 = (uint64_t)0x1000000U;
+  uint64_t b1 = 0x1000000ULL;
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b1);
   Lib_IntVector_Intrinsics_vec256 f40 = e[4U];
   e[4U] = Lib_IntVector_Intrinsics_vec256_or(f40, mask);
@@ -88,28 +79,28 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
   Lib_IntVector_Intrinsics_vec256
   r01 =
     Lib_IntVector_Intrinsics_vec256_insert64(r0,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc0, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc0, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r11 =
     Lib_IntVector_Intrinsics_vec256_insert64(r1,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc1, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc1, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r21 =
     Lib_IntVector_Intrinsics_vec256_insert64(r2,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc2, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc2, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r31 =
     Lib_IntVector_Intrinsics_vec256_insert64(r3,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc3, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc3, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256
   r41 =
     Lib_IntVector_Intrinsics_vec256_insert64(r4,
-      Lib_IntVector_Intrinsics_vec256_extract64(acc4, (uint32_t)0U),
-      (uint32_t)0U);
+      Lib_IntVector_Intrinsics_vec256_extract64(acc4, 0U),
+      0U);
   Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_add64(r01, e0);
   Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_add64(r11, e1);
   Lib_IntVector_Intrinsics_vec256 f2 = Lib_IntVector_Intrinsics_vec256_add64(r21, e2);
@@ -128,14 +119,14 @@ Hacl_Impl_Poly1305_Field32xN_256_load_acc4(Lib_IntVector_Intrinsics_vec256 *acc,
 }
 
 void
-Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
+Hacl_MAC_Poly1305_Simd256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 *out,
   Lib_IntVector_Intrinsics_vec256 *p
 )
 {
   Lib_IntVector_Intrinsics_vec256 *r = p;
-  Lib_IntVector_Intrinsics_vec256 *r_5 = p + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *r4 = p + (uint32_t)10U;
+  Lib_IntVector_Intrinsics_vec256 *r_5 = p + 5U;
+  Lib_IntVector_Intrinsics_vec256 *r4 = p + 10U;
   Lib_IntVector_Intrinsics_vec256 a0 = out[0U];
   Lib_IntVector_Intrinsics_vec256 a1 = out[1U];
   Lib_IntVector_Intrinsics_vec256 a2 = out[2U];
@@ -245,37 +236,30 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t20 = a250;
   Lib_IntVector_Intrinsics_vec256 t30 = a350;
   Lib_IntVector_Intrinsics_vec256 t40 = a450;
-  Lib_IntVector_Intrinsics_vec256
-  mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, 26U);
+  Lib_IntVector_Intrinsics_vec256 z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 26U);
   Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
   Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
   Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
   Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
-  Lib_IntVector_Intrinsics_vec256
-  z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, 26U);
+  Lib_IntVector_Intrinsics_vec256 z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, 26U);
+  Lib_IntVector_Intrinsics_vec256 t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
   Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
   Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
   Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
   Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
+  Lib_IntVector_Intrinsics_vec256 z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
+  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, 26U);
   Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
   Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
   Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
   Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
   Lib_IntVector_Intrinsics_vec256
-  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
+  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, 26U);
   Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
   Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
   Lib_IntVector_Intrinsics_vec256 r20 = x020;
@@ -373,37 +357,30 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t21 = a251;
   Lib_IntVector_Intrinsics_vec256 t31 = a351;
   Lib_IntVector_Intrinsics_vec256 t41 = a451;
-  Lib_IntVector_Intrinsics_vec256
-  mask261 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask261 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z04 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+  Lib_IntVector_Intrinsics_vec256 z14 = Lib_IntVector_Intrinsics_vec256_shift_right64(t31, 26U);
   Lib_IntVector_Intrinsics_vec256 x03 = Lib_IntVector_Intrinsics_vec256_and(t01, mask261);
   Lib_IntVector_Intrinsics_vec256 x33 = Lib_IntVector_Intrinsics_vec256_and(t31, mask261);
   Lib_IntVector_Intrinsics_vec256 x13 = Lib_IntVector_Intrinsics_vec256_add64(t11, z04);
   Lib_IntVector_Intrinsics_vec256 x43 = Lib_IntVector_Intrinsics_vec256_add64(t41, z14);
-  Lib_IntVector_Intrinsics_vec256
-  z011 = Lib_IntVector_Intrinsics_vec256_shift_right64(x13, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z111 = Lib_IntVector_Intrinsics_vec256_shift_right64(x43, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z011 = Lib_IntVector_Intrinsics_vec256_shift_right64(x13, 26U);
+  Lib_IntVector_Intrinsics_vec256 z111 = Lib_IntVector_Intrinsics_vec256_shift_right64(x43, 26U);
+  Lib_IntVector_Intrinsics_vec256 t6 = Lib_IntVector_Intrinsics_vec256_shift_left64(z111, 2U);
   Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z111, t6);
   Lib_IntVector_Intrinsics_vec256 x111 = Lib_IntVector_Intrinsics_vec256_and(x13, mask261);
   Lib_IntVector_Intrinsics_vec256 x411 = Lib_IntVector_Intrinsics_vec256_and(x43, mask261);
   Lib_IntVector_Intrinsics_vec256 x22 = Lib_IntVector_Intrinsics_vec256_add64(t21, z011);
   Lib_IntVector_Intrinsics_vec256 x011 = Lib_IntVector_Intrinsics_vec256_add64(x03, z120);
+  Lib_IntVector_Intrinsics_vec256 z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z021 = Lib_IntVector_Intrinsics_vec256_shift_right64(x22, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, (uint32_t)26U);
+  z131 = Lib_IntVector_Intrinsics_vec256_shift_right64(x011, 26U);
   Lib_IntVector_Intrinsics_vec256 x211 = Lib_IntVector_Intrinsics_vec256_and(x22, mask261);
   Lib_IntVector_Intrinsics_vec256 x021 = Lib_IntVector_Intrinsics_vec256_and(x011, mask261);
   Lib_IntVector_Intrinsics_vec256 x311 = Lib_IntVector_Intrinsics_vec256_add64(x33, z021);
   Lib_IntVector_Intrinsics_vec256 x121 = Lib_IntVector_Intrinsics_vec256_add64(x111, z131);
   Lib_IntVector_Intrinsics_vec256
-  z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, (uint32_t)26U);
+  z031 = Lib_IntVector_Intrinsics_vec256_shift_right64(x311, 26U);
   Lib_IntVector_Intrinsics_vec256 x321 = Lib_IntVector_Intrinsics_vec256_and(x311, mask261);
   Lib_IntVector_Intrinsics_vec256 x421 = Lib_IntVector_Intrinsics_vec256_add64(x411, z031);
   Lib_IntVector_Intrinsics_vec256 r30 = x021;
@@ -441,14 +418,10 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   v34344 = Lib_IntVector_Intrinsics_vec256_interleave_low64(r44, r34);
   Lib_IntVector_Intrinsics_vec256
   r12344 = Lib_IntVector_Intrinsics_vec256_interleave_low128(v34344, v12124);
-  Lib_IntVector_Intrinsics_vec256
-  r123451 = Lib_IntVector_Intrinsics_vec256_smul64(r12341, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123452 = Lib_IntVector_Intrinsics_vec256_smul64(r12342, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123453 = Lib_IntVector_Intrinsics_vec256_smul64(r12343, (uint64_t)5U);
-  Lib_IntVector_Intrinsics_vec256
-  r123454 = Lib_IntVector_Intrinsics_vec256_smul64(r12344, (uint64_t)5U);
+  Lib_IntVector_Intrinsics_vec256 r123451 = Lib_IntVector_Intrinsics_vec256_smul64(r12341, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123452 = Lib_IntVector_Intrinsics_vec256_smul64(r12342, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123453 = Lib_IntVector_Intrinsics_vec256_smul64(r12343, 5ULL);
+  Lib_IntVector_Intrinsics_vec256 r123454 = Lib_IntVector_Intrinsics_vec256_smul64(r12344, 5ULL);
   Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_mul64(r12340, a0);
   Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_mul64(r12341, a0);
   Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_mul64(r12342, a0);
@@ -539,37 +512,28 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256 t2 = a25;
   Lib_IntVector_Intrinsics_vec256 t3 = a35;
   Lib_IntVector_Intrinsics_vec256 t4 = a45;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z121 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z121);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -612,41 +576,36 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   Lib_IntVector_Intrinsics_vec256
   tmp0 =
     Lib_IntVector_Intrinsics_vec256_and(l,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec256 l0 = Lib_IntVector_Intrinsics_vec256_add64(v21, c0);
   Lib_IntVector_Intrinsics_vec256
   tmp1 =
     Lib_IntVector_Intrinsics_vec256_and(l0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(v22, c1);
   Lib_IntVector_Intrinsics_vec256
   tmp2 =
     Lib_IntVector_Intrinsics_vec256_and(l1,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(v23, c2);
   Lib_IntVector_Intrinsics_vec256
   tmp3 =
     Lib_IntVector_Intrinsics_vec256_and(l2,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(v24, c3);
   Lib_IntVector_Intrinsics_vec256
   tmp4 =
     Lib_IntVector_Intrinsics_vec256_and(l3,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec256
   o00 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec256 o1 = tmp1;
   Lib_IntVector_Intrinsics_vec256 o2 = tmp2;
   Lib_IntVector_Intrinsics_vec256 o3 = tmp3;
@@ -658,10 +617,11 @@ Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(
   out[4U] = o4;
 }
 
-void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key)
+void
+Hacl_MAC_Poly1305_Simd256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *key)
 {
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   uint8_t *kr = key;
   acc[0U] = Lib_IntVector_Intrinsics_vec256_zero;
   acc[1U] = Lib_IntVector_Intrinsics_vec256_zero;
@@ -670,41 +630,38 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   acc[4U] = Lib_IntVector_Intrinsics_vec256_zero;
   uint64_t u0 = load64_le(kr);
   uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
+  uint64_t u = load64_le(kr + 8U);
   uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
+  uint64_t mask0 = 0x0ffffffc0fffffffULL;
+  uint64_t mask1 = 0x0ffffffc0ffffffcULL;
   uint64_t lo1 = lo & mask0;
   uint64_t hi1 = hi & mask1;
   Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *rn = pre + (uint32_t)10U;
-  Lib_IntVector_Intrinsics_vec256 *rn_5 = pre + (uint32_t)15U;
+  Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
+  Lib_IntVector_Intrinsics_vec256 *rn = pre + 10U;
+  Lib_IntVector_Intrinsics_vec256 *rn_5 = pre + 15U;
   Lib_IntVector_Intrinsics_vec256 r_vec0 = Lib_IntVector_Intrinsics_vec256_load64(lo1);
   Lib_IntVector_Intrinsics_vec256 r_vec1 = Lib_IntVector_Intrinsics_vec256_load64(hi1);
   Lib_IntVector_Intrinsics_vec256
   f00 =
     Lib_IntVector_Intrinsics_vec256_and(r_vec0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f15 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, 26U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
   f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0,
-        (uint32_t)52U),
+    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec0, 52U),
       Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(r_vec1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
+          Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+        12U));
   Lib_IntVector_Intrinsics_vec256
   f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, 14U),
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
   Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, (uint32_t)40U);
+  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(r_vec1, 40U);
   Lib_IntVector_Intrinsics_vec256 f0 = f00;
   Lib_IntVector_Intrinsics_vec256 f1 = f15;
   Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -720,11 +677,11 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f220 = r[2U];
   Lib_IntVector_Intrinsics_vec256 f230 = r[3U];
   Lib_IntVector_Intrinsics_vec256 f240 = r[4U];
-  r5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f200, (uint64_t)5U);
-  r5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f210, (uint64_t)5U);
-  r5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f220, (uint64_t)5U);
-  r5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f230, (uint64_t)5U);
-  r5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f240, (uint64_t)5U);
+  r5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f200, 5ULL);
+  r5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f210, 5ULL);
+  r5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f220, 5ULL);
+  r5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f230, 5ULL);
+  r5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f240, 5ULL);
   Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
   Lib_IntVector_Intrinsics_vec256 r10 = r[1U];
   Lib_IntVector_Intrinsics_vec256 r20 = r[2U];
@@ -829,37 +786,30 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 t20 = a240;
   Lib_IntVector_Intrinsics_vec256 t30 = a340;
   Lib_IntVector_Intrinsics_vec256 t40 = a440;
-  Lib_IntVector_Intrinsics_vec256
-  mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z00 = Lib_IntVector_Intrinsics_vec256_shift_right64(t00, 26U);
+  Lib_IntVector_Intrinsics_vec256 z10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 26U);
   Lib_IntVector_Intrinsics_vec256 x00 = Lib_IntVector_Intrinsics_vec256_and(t00, mask260);
   Lib_IntVector_Intrinsics_vec256 x30 = Lib_IntVector_Intrinsics_vec256_and(t30, mask260);
   Lib_IntVector_Intrinsics_vec256 x10 = Lib_IntVector_Intrinsics_vec256_add64(t10, z00);
   Lib_IntVector_Intrinsics_vec256 x40 = Lib_IntVector_Intrinsics_vec256_add64(t40, z10);
-  Lib_IntVector_Intrinsics_vec256
-  z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z010 = Lib_IntVector_Intrinsics_vec256_shift_right64(x10, 26U);
+  Lib_IntVector_Intrinsics_vec256 z110 = Lib_IntVector_Intrinsics_vec256_shift_right64(x40, 26U);
+  Lib_IntVector_Intrinsics_vec256 t5 = Lib_IntVector_Intrinsics_vec256_shift_left64(z110, 2U);
   Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z110, t5);
   Lib_IntVector_Intrinsics_vec256 x110 = Lib_IntVector_Intrinsics_vec256_and(x10, mask260);
   Lib_IntVector_Intrinsics_vec256 x410 = Lib_IntVector_Intrinsics_vec256_and(x40, mask260);
   Lib_IntVector_Intrinsics_vec256 x20 = Lib_IntVector_Intrinsics_vec256_add64(t20, z010);
   Lib_IntVector_Intrinsics_vec256 x010 = Lib_IntVector_Intrinsics_vec256_add64(x00, z12);
+  Lib_IntVector_Intrinsics_vec256 z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, 26U);
   Lib_IntVector_Intrinsics_vec256
-  z020 = Lib_IntVector_Intrinsics_vec256_shift_right64(x20, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, (uint32_t)26U);
+  z130 = Lib_IntVector_Intrinsics_vec256_shift_right64(x010, 26U);
   Lib_IntVector_Intrinsics_vec256 x210 = Lib_IntVector_Intrinsics_vec256_and(x20, mask260);
   Lib_IntVector_Intrinsics_vec256 x020 = Lib_IntVector_Intrinsics_vec256_and(x010, mask260);
   Lib_IntVector_Intrinsics_vec256 x310 = Lib_IntVector_Intrinsics_vec256_add64(x30, z020);
   Lib_IntVector_Intrinsics_vec256 x120 = Lib_IntVector_Intrinsics_vec256_add64(x110, z130);
   Lib_IntVector_Intrinsics_vec256
-  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, (uint32_t)26U);
+  z030 = Lib_IntVector_Intrinsics_vec256_shift_right64(x310, 26U);
   Lib_IntVector_Intrinsics_vec256 x320 = Lib_IntVector_Intrinsics_vec256_and(x310, mask260);
   Lib_IntVector_Intrinsics_vec256 x420 = Lib_IntVector_Intrinsics_vec256_add64(x410, z030);
   Lib_IntVector_Intrinsics_vec256 o00 = x020;
@@ -877,11 +827,11 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f221 = rn[2U];
   Lib_IntVector_Intrinsics_vec256 f231 = rn[3U];
   Lib_IntVector_Intrinsics_vec256 f241 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f201, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f211, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f221, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f231, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f241, (uint64_t)5U);
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f201, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f211, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f221, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f231, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f241, 5ULL);
   Lib_IntVector_Intrinsics_vec256 r00 = rn[0U];
   Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
   Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -980,37 +930,28 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 t2 = a24;
   Lib_IntVector_Intrinsics_vec256 t3 = a34;
   Lib_IntVector_Intrinsics_vec256 t4 = a44;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, 26U);
+  Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
   Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
   Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
   Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
   Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+  Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+  Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+  Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
   Lib_IntVector_Intrinsics_vec256 z120 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
   Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
   Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
   Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
   Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z120);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+  Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
   Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
   Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
   Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
   Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+  Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
   Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
   Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
   Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1028,277 +969,57 @@ void Hacl_Poly1305_256_poly1305_init(Lib_IntVector_Intrinsics_vec256 *ctx, uint8
   Lib_IntVector_Intrinsics_vec256 f22 = rn[2U];
   Lib_IntVector_Intrinsics_vec256 f23 = rn[3U];
   Lib_IntVector_Intrinsics_vec256 f24 = rn[4U];
-  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f202, (uint64_t)5U);
-  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f21, (uint64_t)5U);
-  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f22, (uint64_t)5U);
-  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f23, (uint64_t)5U);
-  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f24, (uint64_t)5U);
-}
-
-void Hacl_Poly1305_256_poly1305_update1(Lib_IntVector_Intrinsics_vec256 *ctx, uint8_t *text)
-{
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
-  uint64_t u0 = load64_le(text);
-  uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
-  uint64_t hi = u;
-  Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
-  Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
-  Lib_IntVector_Intrinsics_vec256
-  f010 =
-    Lib_IntVector_Intrinsics_vec256_and(f0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f110 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)26U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f20 =
-    Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-        (uint32_t)52U),
-      Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-          Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-        (uint32_t)12U));
-  Lib_IntVector_Intrinsics_vec256
-  f30 =
-    Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-        (uint32_t)14U),
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
-  Lib_IntVector_Intrinsics_vec256 f01 = f010;
-  Lib_IntVector_Intrinsics_vec256 f111 = f110;
-  Lib_IntVector_Intrinsics_vec256 f2 = f20;
-  Lib_IntVector_Intrinsics_vec256 f3 = f30;
-  Lib_IntVector_Intrinsics_vec256 f41 = f40;
-  e[0U] = f01;
-  e[1U] = f111;
-  e[2U] = f2;
-  e[3U] = f3;
-  e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
-  Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-  Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
-  e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-  Lib_IntVector_Intrinsics_vec256 *r = pre;
-  Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
-  Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
-  Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
-  Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
-  Lib_IntVector_Intrinsics_vec256 r3 = r[3U];
-  Lib_IntVector_Intrinsics_vec256 r4 = r[4U];
-  Lib_IntVector_Intrinsics_vec256 r51 = r5[1U];
-  Lib_IntVector_Intrinsics_vec256 r52 = r5[2U];
-  Lib_IntVector_Intrinsics_vec256 r53 = r5[3U];
-  Lib_IntVector_Intrinsics_vec256 r54 = r5[4U];
-  Lib_IntVector_Intrinsics_vec256 f10 = e[0U];
-  Lib_IntVector_Intrinsics_vec256 f11 = e[1U];
-  Lib_IntVector_Intrinsics_vec256 f12 = e[2U];
-  Lib_IntVector_Intrinsics_vec256 f13 = e[3U];
-  Lib_IntVector_Intrinsics_vec256 f14 = e[4U];
-  Lib_IntVector_Intrinsics_vec256 a0 = acc[0U];
-  Lib_IntVector_Intrinsics_vec256 a1 = acc[1U];
-  Lib_IntVector_Intrinsics_vec256 a2 = acc[2U];
-  Lib_IntVector_Intrinsics_vec256 a3 = acc[3U];
-  Lib_IntVector_Intrinsics_vec256 a4 = acc[4U];
-  Lib_IntVector_Intrinsics_vec256 a01 = Lib_IntVector_Intrinsics_vec256_add64(a0, f10);
-  Lib_IntVector_Intrinsics_vec256 a11 = Lib_IntVector_Intrinsics_vec256_add64(a1, f11);
-  Lib_IntVector_Intrinsics_vec256 a21 = Lib_IntVector_Intrinsics_vec256_add64(a2, f12);
-  Lib_IntVector_Intrinsics_vec256 a31 = Lib_IntVector_Intrinsics_vec256_add64(a3, f13);
-  Lib_IntVector_Intrinsics_vec256 a41 = Lib_IntVector_Intrinsics_vec256_add64(a4, f14);
-  Lib_IntVector_Intrinsics_vec256 a02 = Lib_IntVector_Intrinsics_vec256_mul64(r0, a01);
-  Lib_IntVector_Intrinsics_vec256 a12 = Lib_IntVector_Intrinsics_vec256_mul64(r1, a01);
-  Lib_IntVector_Intrinsics_vec256 a22 = Lib_IntVector_Intrinsics_vec256_mul64(r2, a01);
-  Lib_IntVector_Intrinsics_vec256 a32 = Lib_IntVector_Intrinsics_vec256_mul64(r3, a01);
-  Lib_IntVector_Intrinsics_vec256 a42 = Lib_IntVector_Intrinsics_vec256_mul64(r4, a01);
-  Lib_IntVector_Intrinsics_vec256
-  a03 =
-    Lib_IntVector_Intrinsics_vec256_add64(a02,
-      Lib_IntVector_Intrinsics_vec256_mul64(r54, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a13 =
-    Lib_IntVector_Intrinsics_vec256_add64(a12,
-      Lib_IntVector_Intrinsics_vec256_mul64(r0, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a23 =
-    Lib_IntVector_Intrinsics_vec256_add64(a22,
-      Lib_IntVector_Intrinsics_vec256_mul64(r1, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a33 =
-    Lib_IntVector_Intrinsics_vec256_add64(a32,
-      Lib_IntVector_Intrinsics_vec256_mul64(r2, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a43 =
-    Lib_IntVector_Intrinsics_vec256_add64(a42,
-      Lib_IntVector_Intrinsics_vec256_mul64(r3, a11));
-  Lib_IntVector_Intrinsics_vec256
-  a04 =
-    Lib_IntVector_Intrinsics_vec256_add64(a03,
-      Lib_IntVector_Intrinsics_vec256_mul64(r53, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a14 =
-    Lib_IntVector_Intrinsics_vec256_add64(a13,
-      Lib_IntVector_Intrinsics_vec256_mul64(r54, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a24 =
-    Lib_IntVector_Intrinsics_vec256_add64(a23,
-      Lib_IntVector_Intrinsics_vec256_mul64(r0, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a34 =
-    Lib_IntVector_Intrinsics_vec256_add64(a33,
-      Lib_IntVector_Intrinsics_vec256_mul64(r1, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a44 =
-    Lib_IntVector_Intrinsics_vec256_add64(a43,
-      Lib_IntVector_Intrinsics_vec256_mul64(r2, a21));
-  Lib_IntVector_Intrinsics_vec256
-  a05 =
-    Lib_IntVector_Intrinsics_vec256_add64(a04,
-      Lib_IntVector_Intrinsics_vec256_mul64(r52, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a15 =
-    Lib_IntVector_Intrinsics_vec256_add64(a14,
-      Lib_IntVector_Intrinsics_vec256_mul64(r53, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a25 =
-    Lib_IntVector_Intrinsics_vec256_add64(a24,
-      Lib_IntVector_Intrinsics_vec256_mul64(r54, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a35 =
-    Lib_IntVector_Intrinsics_vec256_add64(a34,
-      Lib_IntVector_Intrinsics_vec256_mul64(r0, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a45 =
-    Lib_IntVector_Intrinsics_vec256_add64(a44,
-      Lib_IntVector_Intrinsics_vec256_mul64(r1, a31));
-  Lib_IntVector_Intrinsics_vec256
-  a06 =
-    Lib_IntVector_Intrinsics_vec256_add64(a05,
-      Lib_IntVector_Intrinsics_vec256_mul64(r51, a41));
-  Lib_IntVector_Intrinsics_vec256
-  a16 =
-    Lib_IntVector_Intrinsics_vec256_add64(a15,
-      Lib_IntVector_Intrinsics_vec256_mul64(r52, a41));
-  Lib_IntVector_Intrinsics_vec256
-  a26 =
-    Lib_IntVector_Intrinsics_vec256_add64(a25,
-      Lib_IntVector_Intrinsics_vec256_mul64(r53, a41));
-  Lib_IntVector_Intrinsics_vec256
-  a36 =
-    Lib_IntVector_Intrinsics_vec256_add64(a35,
-      Lib_IntVector_Intrinsics_vec256_mul64(r54, a41));
-  Lib_IntVector_Intrinsics_vec256
-  a46 =
-    Lib_IntVector_Intrinsics_vec256_add64(a45,
-      Lib_IntVector_Intrinsics_vec256_mul64(r0, a41));
-  Lib_IntVector_Intrinsics_vec256 t0 = a06;
-  Lib_IntVector_Intrinsics_vec256 t1 = a16;
-  Lib_IntVector_Intrinsics_vec256 t2 = a26;
-  Lib_IntVector_Intrinsics_vec256 t3 = a36;
-  Lib_IntVector_Intrinsics_vec256 t4 = a46;
-  Lib_IntVector_Intrinsics_vec256
-  mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t0, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t0, mask26);
-  Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
-  Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
-  Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-  Lib_IntVector_Intrinsics_vec256
-  z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
-  Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
-  Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
-  Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
-  Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
-  Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-  Lib_IntVector_Intrinsics_vec256
-  z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256
-  z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
-  Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
-  Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
-  Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-  Lib_IntVector_Intrinsics_vec256
-  z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
-  Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
-  Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
-  Lib_IntVector_Intrinsics_vec256 o0 = x02;
-  Lib_IntVector_Intrinsics_vec256 o1 = x12;
-  Lib_IntVector_Intrinsics_vec256 o2 = x21;
-  Lib_IntVector_Intrinsics_vec256 o3 = x32;
-  Lib_IntVector_Intrinsics_vec256 o4 = x42;
-  acc[0U] = o0;
-  acc[1U] = o1;
-  acc[2U] = o2;
-  acc[3U] = o3;
-  acc[4U] = o4;
+  rn_5[0U] = Lib_IntVector_Intrinsics_vec256_smul64(f202, 5ULL);
+  rn_5[1U] = Lib_IntVector_Intrinsics_vec256_smul64(f21, 5ULL);
+  rn_5[2U] = Lib_IntVector_Intrinsics_vec256_smul64(f22, 5ULL);
+  rn_5[3U] = Lib_IntVector_Intrinsics_vec256_smul64(f23, 5ULL);
+  rn_5[4U] = Lib_IntVector_Intrinsics_vec256_smul64(f24, 5ULL);
 }
 
-void
-Hacl_Poly1305_256_poly1305_update(
-  Lib_IntVector_Intrinsics_vec256 *ctx,
-  uint32_t len,
-  uint8_t *text
-)
+static void poly1305_update(Lib_IntVector_Intrinsics_vec256 *ctx, uint32_t len, uint8_t *text)
 {
-  Lib_IntVector_Intrinsics_vec256 *pre = ctx + (uint32_t)5U;
+  Lib_IntVector_Intrinsics_vec256 *pre = ctx + 5U;
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  uint32_t sz_block = (uint32_t)64U;
+  uint32_t sz_block = 64U;
   uint32_t len0 = len / sz_block * sz_block;
   uint8_t *t0 = text;
-  if (len0 > (uint32_t)0U)
+  if (len0 > 0U)
   {
-    uint32_t bs = (uint32_t)64U;
+    uint32_t bs = 64U;
     uint8_t *text0 = t0;
-    Hacl_Impl_Poly1305_Field32xN_256_load_acc4(acc, text0);
+    Hacl_MAC_Poly1305_Simd256_load_acc4(acc, text0);
     uint32_t len1 = len0 - bs;
     uint8_t *text1 = t0 + bs;
     uint32_t nb = len1 / bs;
-    for (uint32_t i = (uint32_t)0U; i < nb; i++)
+    for (uint32_t i = 0U; i < nb; i++)
     {
       uint8_t *block = text1 + i * bs;
       KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
       Lib_IntVector_Intrinsics_vec256 lo = Lib_IntVector_Intrinsics_vec256_load64_le(block);
+      Lib_IntVector_Intrinsics_vec256 hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + 32U);
       Lib_IntVector_Intrinsics_vec256
-      hi = Lib_IntVector_Intrinsics_vec256_load64_le(block + (uint32_t)32U);
-      Lib_IntVector_Intrinsics_vec256
-      mask260 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
+      mask260 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
       Lib_IntVector_Intrinsics_vec256
       m0 = Lib_IntVector_Intrinsics_vec256_interleave_low128(lo, hi);
       Lib_IntVector_Intrinsics_vec256
       m1 = Lib_IntVector_Intrinsics_vec256_interleave_high128(lo, hi);
-      Lib_IntVector_Intrinsics_vec256
-      m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, (uint32_t)48U);
-      Lib_IntVector_Intrinsics_vec256
-      m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, (uint32_t)48U);
+      Lib_IntVector_Intrinsics_vec256 m2 = Lib_IntVector_Intrinsics_vec256_shift_right(m0, 48U);
+      Lib_IntVector_Intrinsics_vec256 m3 = Lib_IntVector_Intrinsics_vec256_shift_right(m1, 48U);
       Lib_IntVector_Intrinsics_vec256
       m4 = Lib_IntVector_Intrinsics_vec256_interleave_high64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t010 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m0, m1);
       Lib_IntVector_Intrinsics_vec256
       t30 = Lib_IntVector_Intrinsics_vec256_interleave_low64(m2, m3);
-      Lib_IntVector_Intrinsics_vec256
-      t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)4U);
+      Lib_IntVector_Intrinsics_vec256 t20 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 4U);
       Lib_IntVector_Intrinsics_vec256 o20 = Lib_IntVector_Intrinsics_vec256_and(t20, mask260);
       Lib_IntVector_Intrinsics_vec256
-      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, (uint32_t)26U);
+      t10 = Lib_IntVector_Intrinsics_vec256_shift_right64(t010, 26U);
       Lib_IntVector_Intrinsics_vec256 o10 = Lib_IntVector_Intrinsics_vec256_and(t10, mask260);
       Lib_IntVector_Intrinsics_vec256 o5 = Lib_IntVector_Intrinsics_vec256_and(t010, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, (uint32_t)30U);
+      Lib_IntVector_Intrinsics_vec256 t31 = Lib_IntVector_Intrinsics_vec256_shift_right64(t30, 30U);
       Lib_IntVector_Intrinsics_vec256 o30 = Lib_IntVector_Intrinsics_vec256_and(t31, mask260);
-      Lib_IntVector_Intrinsics_vec256
-      o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256 o40 = Lib_IntVector_Intrinsics_vec256_shift_right64(m4, 40U);
       Lib_IntVector_Intrinsics_vec256 o00 = o5;
       Lib_IntVector_Intrinsics_vec256 o11 = o10;
       Lib_IntVector_Intrinsics_vec256 o21 = o20;
@@ -1309,12 +1030,12 @@ Hacl_Poly1305_256_poly1305_update(
       e[2U] = o21;
       e[3U] = o31;
       e[4U] = o41;
-      uint64_t b = (uint64_t)0x1000000U;
+      uint64_t b = 0x1000000ULL;
       Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
       Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
       e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
-      Lib_IntVector_Intrinsics_vec256 *rn = pre + (uint32_t)10U;
-      Lib_IntVector_Intrinsics_vec256 *rn5 = pre + (uint32_t)15U;
+      Lib_IntVector_Intrinsics_vec256 *rn = pre + 10U;
+      Lib_IntVector_Intrinsics_vec256 *rn5 = pre + 15U;
       Lib_IntVector_Intrinsics_vec256 r0 = rn[0U];
       Lib_IntVector_Intrinsics_vec256 r1 = rn[1U];
       Lib_IntVector_Intrinsics_vec256 r2 = rn[2U];
@@ -1419,37 +1140,28 @@ Hacl_Poly1305_256_poly1305_update(
       Lib_IntVector_Intrinsics_vec256 t2 = a24;
       Lib_IntVector_Intrinsics_vec256 t3 = a34;
       Lib_IntVector_Intrinsics_vec256 t4 = a44;
-      Lib_IntVector_Intrinsics_vec256
-      mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-      Lib_IntVector_Intrinsics_vec256
-      z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+      Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+      Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
       Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
       Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
       Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t1, z0);
       Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-      Lib_IntVector_Intrinsics_vec256
-      z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+      Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+      Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+      Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
       Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
       Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
       Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
       Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
       Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-      Lib_IntVector_Intrinsics_vec256
-      z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-      Lib_IntVector_Intrinsics_vec256
-      z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+      Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
       Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
       Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
       Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
       Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-      Lib_IntVector_Intrinsics_vec256
-      z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
       Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
       Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
       Lib_IntVector_Intrinsics_vec256 o01 = x02;
@@ -1483,45 +1195,41 @@ Hacl_Poly1305_256_poly1305_update(
       acc[3U] = o3;
       acc[4U] = o4;
     }
-    Hacl_Impl_Poly1305_Field32xN_256_fmul_r4_normalize(acc, pre);
+    Hacl_MAC_Poly1305_Simd256_fmul_r4_normalize(acc, pre);
   }
   uint32_t len1 = len - len0;
   uint8_t *t1 = text + len0;
-  uint32_t nb = len1 / (uint32_t)16U;
-  uint32_t rem = len1 % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
+  uint32_t nb = len1 / 16U;
+  uint32_t rem = len1 % 16U;
+  for (uint32_t i = 0U; i < nb; i++)
   {
-    uint8_t *block = t1 + i * (uint32_t)16U;
+    uint8_t *block = t1 + i * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint64_t u0 = load64_le(block);
     uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
+    uint64_t u = load64_le(block + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1532,12 +1240,12 @@ Hacl_Poly1305_256_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
+    uint64_t b = 0x1000000ULL;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
     Lib_IntVector_Intrinsics_vec256 f4 = e[4U];
     e[4U] = Lib_IntVector_Intrinsics_vec256_or(f4, mask);
     Lib_IntVector_Intrinsics_vec256 *r = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1652,37 +1360,28 @@ Hacl_Poly1305_256_poly1305_update(
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1696,41 +1395,37 @@ Hacl_Poly1305_256_poly1305_update(
     acc[3U] = o3;
     acc[4U] = o4;
   }
-  if (rem > (uint32_t)0U)
+  if (rem > 0U)
   {
-    uint8_t *last = t1 + nb * (uint32_t)16U;
+    uint8_t *last = t1 + nb * 16U;
     KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 e[5U] KRML_POST_ALIGN(32) = { 0U };
     uint8_t tmp[16U] = { 0U };
     memcpy(tmp, last, rem * sizeof (uint8_t));
     uint64_t u0 = load64_le(tmp);
     uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
+    uint64_t u = load64_le(tmp + 8U);
     uint64_t hi = u;
     Lib_IntVector_Intrinsics_vec256 f0 = Lib_IntVector_Intrinsics_vec256_load64(lo);
     Lib_IntVector_Intrinsics_vec256 f1 = Lib_IntVector_Intrinsics_vec256_load64(hi);
     Lib_IntVector_Intrinsics_vec256
     f010 =
       Lib_IntVector_Intrinsics_vec256_and(f0,
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f110 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)26U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 26U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
     Lib_IntVector_Intrinsics_vec256
     f20 =
-      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0,
-          (uint32_t)52U),
+      Lib_IntVector_Intrinsics_vec256_or(Lib_IntVector_Intrinsics_vec256_shift_right64(f0, 52U),
         Lib_IntVector_Intrinsics_vec256_shift_left64(Lib_IntVector_Intrinsics_vec256_and(f1,
-            Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffU)),
-          (uint32_t)12U));
+            Lib_IntVector_Intrinsics_vec256_load64(0x3fffULL)),
+          12U));
     Lib_IntVector_Intrinsics_vec256
     f30 =
-      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1,
-          (uint32_t)14U),
-        Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-    Lib_IntVector_Intrinsics_vec256
-    f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, (uint32_t)40U);
+      Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 14U),
+        Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+    Lib_IntVector_Intrinsics_vec256 f40 = Lib_IntVector_Intrinsics_vec256_shift_right64(f1, 40U);
     Lib_IntVector_Intrinsics_vec256 f01 = f010;
     Lib_IntVector_Intrinsics_vec256 f111 = f110;
     Lib_IntVector_Intrinsics_vec256 f2 = f20;
@@ -1741,12 +1436,12 @@ Hacl_Poly1305_256_poly1305_update(
     e[2U] = f2;
     e[3U] = f3;
     e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
+    uint64_t b = 1ULL << rem * 8U % 26U;
     Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_load64(b);
-    Lib_IntVector_Intrinsics_vec256 fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
+    Lib_IntVector_Intrinsics_vec256 fi = e[rem * 8U / 26U];
+    e[rem * 8U / 26U] = Lib_IntVector_Intrinsics_vec256_or(fi, mask);
     Lib_IntVector_Intrinsics_vec256 *r = pre;
-    Lib_IntVector_Intrinsics_vec256 *r5 = pre + (uint32_t)5U;
+    Lib_IntVector_Intrinsics_vec256 *r5 = pre + 5U;
     Lib_IntVector_Intrinsics_vec256 r0 = r[0U];
     Lib_IntVector_Intrinsics_vec256 r1 = r[1U];
     Lib_IntVector_Intrinsics_vec256 r2 = r[2U];
@@ -1861,37 +1556,28 @@ Hacl_Poly1305_256_poly1305_update(
     Lib_IntVector_Intrinsics_vec256 t2 = a26;
     Lib_IntVector_Intrinsics_vec256 t3 = a36;
     Lib_IntVector_Intrinsics_vec256 t4 = a46;
-    Lib_IntVector_Intrinsics_vec256
-    mask26 = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-    Lib_IntVector_Intrinsics_vec256
-    z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 mask26 = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+    Lib_IntVector_Intrinsics_vec256 z0 = Lib_IntVector_Intrinsics_vec256_shift_right64(t01, 26U);
+    Lib_IntVector_Intrinsics_vec256 z1 = Lib_IntVector_Intrinsics_vec256_shift_right64(t3, 26U);
     Lib_IntVector_Intrinsics_vec256 x0 = Lib_IntVector_Intrinsics_vec256_and(t01, mask26);
     Lib_IntVector_Intrinsics_vec256 x3 = Lib_IntVector_Intrinsics_vec256_and(t3, mask26);
     Lib_IntVector_Intrinsics_vec256 x1 = Lib_IntVector_Intrinsics_vec256_add64(t11, z0);
     Lib_IntVector_Intrinsics_vec256 x4 = Lib_IntVector_Intrinsics_vec256_add64(t4, z1);
-    Lib_IntVector_Intrinsics_vec256
-    z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, (uint32_t)2U);
+    Lib_IntVector_Intrinsics_vec256 z01 = Lib_IntVector_Intrinsics_vec256_shift_right64(x1, 26U);
+    Lib_IntVector_Intrinsics_vec256 z11 = Lib_IntVector_Intrinsics_vec256_shift_right64(x4, 26U);
+    Lib_IntVector_Intrinsics_vec256 t = Lib_IntVector_Intrinsics_vec256_shift_left64(z11, 2U);
     Lib_IntVector_Intrinsics_vec256 z12 = Lib_IntVector_Intrinsics_vec256_add64(z11, t);
     Lib_IntVector_Intrinsics_vec256 x11 = Lib_IntVector_Intrinsics_vec256_and(x1, mask26);
     Lib_IntVector_Intrinsics_vec256 x41 = Lib_IntVector_Intrinsics_vec256_and(x4, mask26);
     Lib_IntVector_Intrinsics_vec256 x2 = Lib_IntVector_Intrinsics_vec256_add64(t2, z01);
     Lib_IntVector_Intrinsics_vec256 x01 = Lib_IntVector_Intrinsics_vec256_add64(x0, z12);
-    Lib_IntVector_Intrinsics_vec256
-    z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, (uint32_t)26U);
-    Lib_IntVector_Intrinsics_vec256
-    z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z02 = Lib_IntVector_Intrinsics_vec256_shift_right64(x2, 26U);
+    Lib_IntVector_Intrinsics_vec256 z13 = Lib_IntVector_Intrinsics_vec256_shift_right64(x01, 26U);
     Lib_IntVector_Intrinsics_vec256 x21 = Lib_IntVector_Intrinsics_vec256_and(x2, mask26);
     Lib_IntVector_Intrinsics_vec256 x02 = Lib_IntVector_Intrinsics_vec256_and(x01, mask26);
     Lib_IntVector_Intrinsics_vec256 x31 = Lib_IntVector_Intrinsics_vec256_add64(x3, z02);
     Lib_IntVector_Intrinsics_vec256 x12 = Lib_IntVector_Intrinsics_vec256_add64(x11, z13);
-    Lib_IntVector_Intrinsics_vec256
-    z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, (uint32_t)26U);
+    Lib_IntVector_Intrinsics_vec256 z03 = Lib_IntVector_Intrinsics_vec256_shift_right64(x31, 26U);
     Lib_IntVector_Intrinsics_vec256 x32 = Lib_IntVector_Intrinsics_vec256_and(x31, mask26);
     Lib_IntVector_Intrinsics_vec256 x42 = Lib_IntVector_Intrinsics_vec256_add64(x41, z03);
     Lib_IntVector_Intrinsics_vec256 o0 = x02;
@@ -1909,14 +1595,14 @@ Hacl_Poly1305_256_poly1305_update(
 }
 
 void
-Hacl_Poly1305_256_poly1305_finish(
+Hacl_MAC_Poly1305_Simd256_poly1305_finish(
   uint8_t *tag,
   uint8_t *key,
   Lib_IntVector_Intrinsics_vec256 *ctx
 )
 {
   Lib_IntVector_Intrinsics_vec256 *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
+  uint8_t *ks = key + 16U;
   Lib_IntVector_Intrinsics_vec256 f0 = acc[0U];
   Lib_IntVector_Intrinsics_vec256 f13 = acc[1U];
   Lib_IntVector_Intrinsics_vec256 f23 = acc[2U];
@@ -1927,41 +1613,36 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256
   tmp00 =
     Lib_IntVector_Intrinsics_vec256_and(l0,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c00 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c00 = Lib_IntVector_Intrinsics_vec256_shift_right64(l0, 26U);
   Lib_IntVector_Intrinsics_vec256 l1 = Lib_IntVector_Intrinsics_vec256_add64(f13, c00);
   Lib_IntVector_Intrinsics_vec256
   tmp10 =
     Lib_IntVector_Intrinsics_vec256_and(l1,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c10 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c10 = Lib_IntVector_Intrinsics_vec256_shift_right64(l1, 26U);
   Lib_IntVector_Intrinsics_vec256 l2 = Lib_IntVector_Intrinsics_vec256_add64(f23, c10);
   Lib_IntVector_Intrinsics_vec256
   tmp20 =
     Lib_IntVector_Intrinsics_vec256_and(l2,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c20 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c20 = Lib_IntVector_Intrinsics_vec256_shift_right64(l2, 26U);
   Lib_IntVector_Intrinsics_vec256 l3 = Lib_IntVector_Intrinsics_vec256_add64(f33, c20);
   Lib_IntVector_Intrinsics_vec256
   tmp30 =
     Lib_IntVector_Intrinsics_vec256_and(l3,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c30 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c30 = Lib_IntVector_Intrinsics_vec256_shift_right64(l3, 26U);
   Lib_IntVector_Intrinsics_vec256 l4 = Lib_IntVector_Intrinsics_vec256_add64(f40, c30);
   Lib_IntVector_Intrinsics_vec256
   tmp40 =
     Lib_IntVector_Intrinsics_vec256_and(l4,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c40 = Lib_IntVector_Intrinsics_vec256_shift_right64(l4, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c40 = Lib_IntVector_Intrinsics_vec256_shift_right64(l4, 26U);
   Lib_IntVector_Intrinsics_vec256
   f010 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp00,
-      Lib_IntVector_Intrinsics_vec256_smul64(c40, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c40, 5ULL));
   Lib_IntVector_Intrinsics_vec256 f110 = tmp10;
   Lib_IntVector_Intrinsics_vec256 f210 = tmp20;
   Lib_IntVector_Intrinsics_vec256 f310 = tmp30;
@@ -1971,49 +1652,42 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256
   tmp0 =
     Lib_IntVector_Intrinsics_vec256_and(l,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c0 = Lib_IntVector_Intrinsics_vec256_shift_right64(l, 26U);
   Lib_IntVector_Intrinsics_vec256 l5 = Lib_IntVector_Intrinsics_vec256_add64(f110, c0);
   Lib_IntVector_Intrinsics_vec256
   tmp1 =
     Lib_IntVector_Intrinsics_vec256_and(l5,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l5, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c1 = Lib_IntVector_Intrinsics_vec256_shift_right64(l5, 26U);
   Lib_IntVector_Intrinsics_vec256 l6 = Lib_IntVector_Intrinsics_vec256_add64(f210, c1);
   Lib_IntVector_Intrinsics_vec256
   tmp2 =
     Lib_IntVector_Intrinsics_vec256_and(l6,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l6, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c2 = Lib_IntVector_Intrinsics_vec256_shift_right64(l6, 26U);
   Lib_IntVector_Intrinsics_vec256 l7 = Lib_IntVector_Intrinsics_vec256_add64(f310, c2);
   Lib_IntVector_Intrinsics_vec256
   tmp3 =
     Lib_IntVector_Intrinsics_vec256_and(l7,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l7, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c3 = Lib_IntVector_Intrinsics_vec256_shift_right64(l7, 26U);
   Lib_IntVector_Intrinsics_vec256 l8 = Lib_IntVector_Intrinsics_vec256_add64(f410, c3);
   Lib_IntVector_Intrinsics_vec256
   tmp4 =
     Lib_IntVector_Intrinsics_vec256_and(l8,
-      Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU));
-  Lib_IntVector_Intrinsics_vec256
-  c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l8, (uint32_t)26U);
+      Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL));
+  Lib_IntVector_Intrinsics_vec256 c4 = Lib_IntVector_Intrinsics_vec256_shift_right64(l8, 26U);
   Lib_IntVector_Intrinsics_vec256
   f02 =
     Lib_IntVector_Intrinsics_vec256_add64(tmp0,
-      Lib_IntVector_Intrinsics_vec256_smul64(c4, (uint64_t)5U));
+      Lib_IntVector_Intrinsics_vec256_smul64(c4, 5ULL));
   Lib_IntVector_Intrinsics_vec256 f12 = tmp1;
   Lib_IntVector_Intrinsics_vec256 f22 = tmp2;
   Lib_IntVector_Intrinsics_vec256 f32 = tmp3;
   Lib_IntVector_Intrinsics_vec256 f42 = tmp4;
-  Lib_IntVector_Intrinsics_vec256
-  mh = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3ffffffU);
-  Lib_IntVector_Intrinsics_vec256
-  ml = Lib_IntVector_Intrinsics_vec256_load64((uint64_t)0x3fffffbU);
+  Lib_IntVector_Intrinsics_vec256 mh = Lib_IntVector_Intrinsics_vec256_load64(0x3ffffffULL);
+  Lib_IntVector_Intrinsics_vec256 ml = Lib_IntVector_Intrinsics_vec256_load64(0x3fffffbULL);
   Lib_IntVector_Intrinsics_vec256 mask = Lib_IntVector_Intrinsics_vec256_eq64(f42, mh);
   Lib_IntVector_Intrinsics_vec256
   mask1 =
@@ -2053,36 +1727,334 @@ Hacl_Poly1305_256_poly1305_finish(
   Lib_IntVector_Intrinsics_vec256 f2 = acc[2U];
   Lib_IntVector_Intrinsics_vec256 f3 = acc[3U];
   Lib_IntVector_Intrinsics_vec256 f4 = acc[4U];
-  uint64_t f01 = Lib_IntVector_Intrinsics_vec256_extract64(f00, (uint32_t)0U);
-  uint64_t f112 = Lib_IntVector_Intrinsics_vec256_extract64(f1, (uint32_t)0U);
-  uint64_t f212 = Lib_IntVector_Intrinsics_vec256_extract64(f2, (uint32_t)0U);
-  uint64_t f312 = Lib_IntVector_Intrinsics_vec256_extract64(f3, (uint32_t)0U);
-  uint64_t f41 = Lib_IntVector_Intrinsics_vec256_extract64(f4, (uint32_t)0U);
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
+  uint64_t f01 = Lib_IntVector_Intrinsics_vec256_extract64(f00, 0U);
+  uint64_t f112 = Lib_IntVector_Intrinsics_vec256_extract64(f1, 0U);
+  uint64_t f212 = Lib_IntVector_Intrinsics_vec256_extract64(f2, 0U);
+  uint64_t f312 = Lib_IntVector_Intrinsics_vec256_extract64(f3, 0U);
+  uint64_t f41 = Lib_IntVector_Intrinsics_vec256_extract64(f4, 0U);
+  uint64_t lo = (f01 | f112 << 26U) | f212 << 52U;
+  uint64_t hi = (f212 >> 12U | f312 << 14U) | f41 << 40U;
   uint64_t f10 = lo;
   uint64_t f11 = hi;
   uint64_t u0 = load64_le(ks);
   uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
+  uint64_t u = load64_le(ks + 8U);
   uint64_t hi0 = u;
   uint64_t f20 = lo0;
   uint64_t f21 = hi0;
   uint64_t r0 = f10 + f20;
   uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
+  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> 63U;
   uint64_t r11 = r1 + c;
   uint64_t f30 = r0;
   uint64_t f31 = r11;
   store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
+  store64_le(tag + 8U, f31);
+}
+
+Hacl_MAC_Poly1305_Simd256_state_t *Hacl_MAC_Poly1305_Simd256_malloc(uint8_t *key)
+{
+  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC(64U, sizeof (uint8_t));
+  Lib_IntVector_Intrinsics_vec256
+  *r1 =
+    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
+      sizeof (Lib_IntVector_Intrinsics_vec256) * 25U);
+  memset(r1, 0U, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Lib_IntVector_Intrinsics_vec256 *block_state = r1;
+  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC(32U, sizeof (uint8_t));
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_0 = k_;
+  Hacl_MAC_Poly1305_Simd256_state_t
+  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_0 };
+  Hacl_MAC_Poly1305_Simd256_state_t
+  *p =
+    (Hacl_MAC_Poly1305_Simd256_state_t *)KRML_HOST_MALLOC(sizeof (
+        Hacl_MAC_Poly1305_Simd256_state_t
+      ));
+  p[0U] = s;
+  Hacl_MAC_Poly1305_Simd256_poly1305_init(block_state, key);
+  return p;
+}
+
+void Hacl_MAC_Poly1305_Simd256_reset(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *key)
+{
+  Hacl_MAC_Poly1305_Simd256_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
+  Hacl_MAC_Poly1305_Simd256_poly1305_init(block_state, key);
+  memcpy(k_, key, 32U * sizeof (uint8_t));
+  uint8_t *k_1 = k_;
+  Hacl_MAC_Poly1305_Simd256_state_t
+  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)0U, .p_key = k_1 };
+  state[0U] = tmp;
 }
 
-void Hacl_Poly1305_256_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
+/**
+0 = success, 1 = max length exceeded
+*/
+Hacl_Streaming_Types_error_code
+Hacl_MAC_Poly1305_Simd256_update(
+  Hacl_MAC_Poly1305_Simd256_state_t *state,
+  uint8_t *chunk,
+  uint32_t chunk_len
+)
+{
+  Hacl_MAC_Poly1305_Simd256_state_t s = *state;
+  uint64_t total_len = s.total_len;
+  if ((uint64_t)chunk_len > 0xffffffffULL - total_len)
+  {
+    return Hacl_Streaming_Types_MaximumLengthExceeded;
+  }
+  uint32_t sz;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    sz = 64U;
+  }
+  else
+  {
+    sz = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  if (chunk_len <= 64U - sz)
+  {
+    Hacl_MAC_Poly1305_Simd256_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf + sz1;
+    memcpy(buf2, chunk, chunk_len * sizeof (uint8_t));
+    uint64_t total_len2 = total_len1 + (uint64_t)chunk_len;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+  }
+  else if (sz == 0U)
+  {
+    Hacl_MAC_Poly1305_Simd256_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
+    uint8_t *buf = s1.buf;
+    uint64_t total_len1 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 64U, buf);
+    }
+    uint32_t ite;
+    if ((uint64_t)chunk_len % (uint64_t)64U == 0ULL && (uint64_t)chunk_len > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)chunk_len % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - data1_len;
+    uint8_t *data1 = chunk;
+    uint8_t *data2 = chunk + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)chunk_len,
+          .p_key = k_1
+        }
+      );
+  }
+  else
+  {
+    uint32_t diff = 64U - sz;
+    uint8_t *chunk1 = chunk;
+    uint8_t *chunk2 = chunk + diff;
+    Hacl_MAC_Poly1305_Simd256_state_t s1 = *state;
+    Lib_IntVector_Intrinsics_vec256 *block_state10 = s1.block_state;
+    uint8_t *buf0 = s1.buf;
+    uint64_t total_len10 = s1.total_len;
+    uint8_t *k_1 = s1.p_key;
+    uint32_t sz10;
+    if (total_len10 % (uint64_t)64U == 0ULL && total_len10 > 0ULL)
+    {
+      sz10 = 64U;
+    }
+    else
+    {
+      sz10 = (uint32_t)(total_len10 % (uint64_t)64U);
+    }
+    uint8_t *buf2 = buf0 + sz10;
+    memcpy(buf2, chunk1, diff * sizeof (uint8_t));
+    uint64_t total_len2 = total_len10 + (uint64_t)diff;
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd256_state_t){
+          .block_state = block_state10,
+          .buf = buf0,
+          .total_len = total_len2,
+          .p_key = k_1
+        }
+      );
+    Hacl_MAC_Poly1305_Simd256_state_t s10 = *state;
+    Lib_IntVector_Intrinsics_vec256 *block_state1 = s10.block_state;
+    uint8_t *buf = s10.buf;
+    uint64_t total_len1 = s10.total_len;
+    uint8_t *k_10 = s10.p_key;
+    uint32_t sz1;
+    if (total_len1 % (uint64_t)64U == 0ULL && total_len1 > 0ULL)
+    {
+      sz1 = 64U;
+    }
+    else
+    {
+      sz1 = (uint32_t)(total_len1 % (uint64_t)64U);
+    }
+    if (!(sz1 == 0U))
+    {
+      poly1305_update(block_state1, 64U, buf);
+    }
+    uint32_t ite;
+    if
+    ((uint64_t)(chunk_len - diff) % (uint64_t)64U == 0ULL && (uint64_t)(chunk_len - diff) > 0ULL)
+    {
+      ite = 64U;
+    }
+    else
+    {
+      ite = (uint32_t)((uint64_t)(chunk_len - diff) % (uint64_t)64U);
+    }
+    uint32_t n_blocks = (chunk_len - diff - ite) / 64U;
+    uint32_t data1_len = n_blocks * 64U;
+    uint32_t data2_len = chunk_len - diff - data1_len;
+    uint8_t *data1 = chunk2;
+    uint8_t *data2 = chunk2 + data1_len;
+    poly1305_update(block_state1, data1_len, data1);
+    uint8_t *dst = buf;
+    memcpy(dst, data2, data2_len * sizeof (uint8_t));
+    *state
+    =
+      (
+        (Hacl_MAC_Poly1305_Simd256_state_t){
+          .block_state = block_state1,
+          .buf = buf,
+          .total_len = total_len1 + (uint64_t)(chunk_len - diff),
+          .p_key = k_10
+        }
+      );
+  }
+  return Hacl_Streaming_Types_Success;
+}
+
+void
+Hacl_MAC_Poly1305_Simd256_digest(Hacl_MAC_Poly1305_Simd256_state_t *state, uint8_t *output)
+{
+  Hacl_MAC_Poly1305_Simd256_state_t scrut = *state;
+  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
+  uint8_t *buf_ = scrut.buf;
+  uint64_t total_len = scrut.total_len;
+  uint8_t *k_ = scrut.p_key;
+  uint32_t r;
+  if (total_len % (uint64_t)64U == 0ULL && total_len > 0ULL)
+  {
+    r = 64U;
+  }
+  else
+  {
+    r = (uint32_t)(total_len % (uint64_t)64U);
+  }
+  uint8_t *buf_1 = buf_;
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 r1[25U] KRML_POST_ALIGN(32) = { 0U };
+  Lib_IntVector_Intrinsics_vec256 *tmp_block_state = r1;
+  memcpy(tmp_block_state, block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  uint32_t ite0;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite0 = 16U;
+  }
+  else
+  {
+    ite0 = r % 16U;
+  }
+  uint8_t *buf_last = buf_1 + r - ite0;
+  uint8_t *buf_multi = buf_1;
+  uint32_t ite;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite = 16U;
+  }
+  else
+  {
+    ite = r % 16U;
+  }
+  poly1305_update(tmp_block_state, r - ite, buf_multi);
+  uint32_t ite1;
+  if (r % 16U == 0U && r > 0U)
+  {
+    ite1 = 16U;
+  }
+  else
+  {
+    ite1 = r % 16U;
+  }
+  poly1305_update(tmp_block_state, ite1, buf_last);
+  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 tmp[25U] KRML_POST_ALIGN(32) = { 0U };
+  memcpy(tmp, tmp_block_state, 25U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  Hacl_MAC_Poly1305_Simd256_poly1305_finish(output, k_, tmp);
+}
+
+void Hacl_MAC_Poly1305_Simd256_free(Hacl_MAC_Poly1305_Simd256_state_t *state)
+{
+  Hacl_MAC_Poly1305_Simd256_state_t scrut = *state;
+  uint8_t *k_ = scrut.p_key;
+  uint8_t *buf = scrut.buf;
+  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
+  KRML_HOST_FREE(k_);
+  KRML_ALIGNED_FREE(block_state);
+  KRML_HOST_FREE(buf);
+  KRML_HOST_FREE(state);
+}
+
+void
+Hacl_MAC_Poly1305_Simd256_mac(
+  uint8_t *output,
+  uint8_t *input,
+  uint32_t input_len,
+  uint8_t *key
+)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ctx[25U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Poly1305_256_poly1305_init(ctx, key);
-  Hacl_Poly1305_256_poly1305_update(ctx, len, text);
-  Hacl_Poly1305_256_poly1305_finish(tag, key, ctx);
+  Hacl_MAC_Poly1305_Simd256_poly1305_init(ctx, key);
+  poly1305_update(ctx, input_len, input);
+  Hacl_MAC_Poly1305_Simd256_poly1305_finish(output, key, ctx);
 }
 
diff --git a/src/msvc/Hacl_NaCl.c b/src/msvc/Hacl_NaCl.c
index 37104040..a1bbd25c 100644
--- a/src/msvc/Hacl_NaCl.c
+++ b/src/msvc/Hacl_NaCl.c
@@ -30,9 +30,9 @@
 static void secretbox_init(uint8_t *xkeys, uint8_t *k, uint8_t *n)
 {
   uint8_t *subkey = xkeys;
-  uint8_t *aekey = xkeys + (uint32_t)32U;
+  uint8_t *aekey = xkeys + 32U;
   uint8_t *n0 = n;
-  uint8_t *n1 = n + (uint32_t)16U;
+  uint8_t *n1 = n + 16U;
   Hacl_Salsa20_hsalsa20(subkey, k, n0);
   Hacl_Salsa20_salsa20_key_block0(aekey, subkey, n1);
 }
@@ -42,35 +42,35 @@ secretbox_detached(uint32_t mlen, uint8_t *c, uint8_t *tag, uint8_t *k, uint8_t
 {
   uint8_t xkeys[96U] = { 0U };
   secretbox_init(xkeys, k, n);
-  uint8_t *mkey = xkeys + (uint32_t)32U;
-  uint8_t *n1 = n + (uint32_t)16U;
+  uint8_t *mkey = xkeys + 32U;
+  uint8_t *n1 = n + 16U;
   uint8_t *subkey = xkeys;
-  uint8_t *ekey0 = xkeys + (uint32_t)64U;
+  uint8_t *ekey0 = xkeys + 64U;
   uint32_t mlen0;
-  if (mlen <= (uint32_t)32U)
+  if (mlen <= 32U)
   {
     mlen0 = mlen;
   }
   else
   {
-    mlen0 = (uint32_t)32U;
+    mlen0 = 32U;
   }
   uint32_t mlen1 = mlen - mlen0;
   uint8_t *m0 = m;
   uint8_t *m1 = m + mlen0;
   uint8_t block0[32U] = { 0U };
   memcpy(block0, m0, mlen0 * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     uint8_t *os = block0;
-    uint8_t x = block0[i] ^ ekey0[i];
+    uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i];
     os[i] = x;
   }
   uint8_t *c0 = c;
   uint8_t *c1 = c + mlen0;
   memcpy(c0, block0, mlen0 * sizeof (uint8_t));
-  Hacl_Salsa20_salsa20_encrypt(mlen1, c1, m1, subkey, n1, (uint32_t)1U);
-  Hacl_Poly1305_32_poly1305_mac(tag, mlen, c, mkey);
+  Hacl_Salsa20_salsa20_encrypt(mlen1, c1, m1, subkey, n1, 1U);
+  Hacl_MAC_Poly1305_mac(tag, c, mlen, mkey);
 }
 
 static uint32_t
@@ -85,55 +85,55 @@ secretbox_open_detached(
 {
   uint8_t xkeys[96U] = { 0U };
   secretbox_init(xkeys, k, n);
-  uint8_t *mkey = xkeys + (uint32_t)32U;
+  uint8_t *mkey = xkeys + 32U;
   uint8_t tag_[16U] = { 0U };
-  Hacl_Poly1305_32_poly1305_mac(tag_, mlen, c, mkey);
-  uint8_t res = (uint8_t)255U;
+  Hacl_MAC_Poly1305_mac(tag_, c, mlen, mkey);
+  uint8_t res = 255U;
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint8_t uu____0 = FStar_UInt8_eq_mask(tag[i], tag_[i]);
-    res = uu____0 & res;);
+    res = (uint32_t)uu____0 & (uint32_t)res;);
   uint8_t z = res;
-  if (z == (uint8_t)255U)
+  if (z == 255U)
   {
     uint8_t *subkey = xkeys;
-    uint8_t *ekey0 = xkeys + (uint32_t)64U;
-    uint8_t *n1 = n + (uint32_t)16U;
+    uint8_t *ekey0 = xkeys + 64U;
+    uint8_t *n1 = n + 16U;
     uint32_t mlen0;
-    if (mlen <= (uint32_t)32U)
+    if (mlen <= 32U)
     {
       mlen0 = mlen;
     }
     else
     {
-      mlen0 = (uint32_t)32U;
+      mlen0 = 32U;
     }
     uint32_t mlen1 = mlen - mlen0;
     uint8_t *c0 = c;
     uint8_t *c1 = c + mlen0;
     uint8_t block0[32U] = { 0U };
     memcpy(block0, c0, mlen0 * sizeof (uint8_t));
-    for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+    for (uint32_t i = 0U; i < 32U; i++)
     {
       uint8_t *os = block0;
-      uint8_t x = block0[i] ^ ekey0[i];
+      uint8_t x = (uint32_t)block0[i] ^ (uint32_t)ekey0[i];
       os[i] = x;
     }
     uint8_t *m0 = m;
     uint8_t *m1 = m + mlen0;
     memcpy(m0, block0, mlen0 * sizeof (uint8_t));
-    Hacl_Salsa20_salsa20_decrypt(mlen1, m1, c1, subkey, n1, (uint32_t)1U);
-    return (uint32_t)0U;
+    Hacl_Salsa20_salsa20_decrypt(mlen1, m1, c1, subkey, n1, 1U);
+    return 0U;
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static void secretbox_easy(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   secretbox_detached(mlen, cip, tag, k, n, m);
 }
 
@@ -141,7 +141,7 @@ static uint32_t
 secretbox_open_easy(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return secretbox_open_detached(mlen, m, k, n, cip, tag);
 }
 
@@ -152,9 +152,9 @@ static inline uint32_t box_beforenm(uint8_t *k, uint8_t *pk, uint8_t *sk)
   if (r)
   {
     Hacl_Salsa20_hsalsa20(k, k, n0);
-    return (uint32_t)0U;
+    return 0U;
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
@@ -168,7 +168,7 @@ box_detached_afternm(
 )
 {
   secretbox_detached(mlen, c, tag, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 static inline uint32_t
@@ -184,11 +184,11 @@ box_detached(
 {
   uint8_t k[32U] = { 0U };
   uint32_t r = box_beforenm(k, pk, sk);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return box_detached_afternm(mlen, c, tag, k, n, m);
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
@@ -217,18 +217,18 @@ box_open_detached(
 {
   uint8_t k[32U] = { 0U };
   uint32_t r = box_beforenm(k, pk, sk);
-  if (r == (uint32_t)0U)
+  if (r == 0U)
   {
     return box_open_detached_afternm(mlen, m, k, n, c, tag);
   }
-  return (uint32_t)0xffffffffU;
+  return 0xffffffffU;
 }
 
 static inline uint32_t
 box_easy_afternm(uint32_t mlen, uint8_t *c, uint8_t *k, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   uint32_t res = box_detached_afternm(mlen, cip, tag, k, n, m);
   return res;
 }
@@ -237,7 +237,7 @@ static inline uint32_t
 box_easy(uint32_t mlen, uint8_t *c, uint8_t *sk, uint8_t *pk, uint8_t *n, uint8_t *m)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   uint32_t res = box_detached(mlen, cip, tag, sk, pk, n, m);
   return res;
 }
@@ -246,7 +246,7 @@ static inline uint32_t
 box_open_easy_afternm(uint32_t mlen, uint8_t *m, uint8_t *k, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return box_open_detached_afternm(mlen, m, k, n, cip, tag);
 }
 
@@ -254,7 +254,7 @@ static inline uint32_t
 box_open_easy(uint32_t mlen, uint8_t *m, uint8_t *pk, uint8_t *sk, uint8_t *n, uint8_t *c)
 {
   uint8_t *tag = c;
-  uint8_t *cip = c + (uint32_t)16U;
+  uint8_t *cip = c + 16U;
   return box_open_detached(mlen, m, pk, sk, n, cip, tag);
 }
 
@@ -281,7 +281,7 @@ Hacl_NaCl_crypto_secretbox_detached(
 )
 {
   secretbox_detached(mlen, c, tag, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 /**
@@ -322,7 +322,7 @@ uint32_t
 Hacl_NaCl_crypto_secretbox_easy(uint8_t *c, uint8_t *m, uint32_t mlen, uint8_t *n, uint8_t *k)
 {
   secretbox_easy(mlen, c, k, n, m);
-  return (uint32_t)0U;
+  return 0U;
 }
 
 /**
@@ -343,7 +343,7 @@ Hacl_NaCl_crypto_secretbox_open_easy(
   uint8_t *k
 )
 {
-  return secretbox_open_easy(clen - (uint32_t)16U, m, k, n, c);
+  return secretbox_open_easy(clen - 16U, m, k, n, c);
 }
 
 /**
@@ -490,7 +490,7 @@ Hacl_NaCl_crypto_box_open_easy_afternm(
   uint8_t *k
 )
 {
-  return box_open_easy_afternm(clen - (uint32_t)16U, m, k, n, c);
+  return box_open_easy_afternm(clen - 16U, m, k, n, c);
 }
 
 /**
@@ -513,6 +513,6 @@ Hacl_NaCl_crypto_box_open_easy(
   uint8_t *sk
 )
 {
-  return box_open_easy(clen - (uint32_t)16U, m, pk, sk, n, c);
+  return box_open_easy(clen - 16U, m, pk, sk, n, c);
 }
 
diff --git a/src/msvc/Hacl_P256.c b/src/msvc/Hacl_P256.c
index 7e586e54..609fed81 100644
--- a/src/msvc/Hacl_P256.c
+++ b/src/msvc/Hacl_P256.c
@@ -33,11 +33,11 @@
 static inline uint64_t bn_is_zero_mask4(uint64_t *f)
 {
   uint64_t bn_zero[4U] = { 0U };
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(f[i], bn_zero[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -48,16 +48,16 @@ static inline uint64_t bn_is_zero_mask4(uint64_t *f)
 static inline bool bn_is_zero_vartime4(uint64_t *f)
 {
   uint64_t m = bn_is_zero_mask4(f);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline uint64_t bn_is_eq_mask4(uint64_t *a, uint64_t *b)
 {
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t uu____0 = FStar_UInt64_eq_mask(a[i], b[i]);
     mask = uu____0 & mask;);
   uint64_t mask1 = mask;
@@ -67,16 +67,16 @@ static inline uint64_t bn_is_eq_mask4(uint64_t *a, uint64_t *b)
 static inline bool bn_is_eq_vartime4(uint64_t *a, uint64_t *b)
 {
   uint64_t m = bn_is_eq_mask4(a, b);
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline void bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_t *y)
 {
-  uint64_t mask = ~FStar_UInt64_eq_mask(cin, (uint64_t)0U);
+  uint64_t mask = ~FStar_UInt64_eq_mask(cin, 0ULL);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t uu____0 = x[i];
     uint64_t x1 = uu____0 ^ (mask & (y[i] ^ uu____0));
@@ -85,52 +85,52 @@ static inline void bn_cmovznz4(uint64_t *res, uint64_t cin, uint64_t *x, uint64_
 
 static inline void bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -138,23 +138,23 @@ static inline void bn_add_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t
 
 static inline uint64_t bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y)
 {
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c0 = c;
@@ -163,53 +163,53 @@ static inline uint64_t bn_sub4(uint64_t *res, uint64_t *x, uint64_t *y)
 
 static inline void bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t *y)
 {
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   {
-    uint64_t t1 = x[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = y[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = res + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = x[4U * 0U];
+    uint64_t t20 = y[4U * 0U];
+    uint64_t *res_i0 = res + 4U * 0U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t1, t20, res_i0);
-    uint64_t t10 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = x[4U * 0U + 1U];
+    uint64_t t21 = y[4U * 0U + 1U];
+    uint64_t *res_i1 = res + 4U * 0U + 1U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t10, t21, res_i1);
-    uint64_t t11 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = x[4U * 0U + 2U];
+    uint64_t t22 = y[4U * 0U + 2U];
+    uint64_t *res_i2 = res + 4U * 0U + 2U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t11, t22, res_i2);
-    uint64_t t12 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = y[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = res + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = x[4U * 0U + 3U];
+    uint64_t t2 = y[4U * 0U + 3U];
+    uint64_t *res_i = res + 4U * 0U + 3U;
     c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64(c0, t12, t2, res_i);
   }
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
-  KRML_HOST_IGNORE(c1);
-  uint64_t c2 = (uint64_t)0U - c00;
+  KRML_MAYBE_UNUSED_VAR(c1);
+  uint64_t c2 = 0ULL - c00;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & tmp[i]) | (~c2 & res[i]);
     os[i] = x1;);
@@ -217,59 +217,59 @@ static inline void bn_sub_mod4(uint64_t *res, uint64_t *n, uint64_t *x, uint64_t
 
 static inline void bn_mul4(uint64_t *res, uint64_t *x, uint64_t *y)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t bj = y[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = x[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = x[4U * 0U];
+      uint64_t *res_i0 = res_j + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, bj, c, res_i0);
-      uint64_t a_i0 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = x[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, bj, c, res_i1);
-      uint64_t a_i1 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = x[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, bj, c, res_i2);
-      uint64_t a_i2 = x[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = x[4U * 0U + 3U];
+      uint64_t *res_i = res_j + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, bj, c, res_i);
     }
     uint64_t r = c;
-    res[(uint32_t)4U + i0] = r;);
+    res[4U + i0] = r;);
 }
 
 static inline void bn_sqr4(uint64_t *res, uint64_t *x)
 {
-  memset(res, 0U, (uint32_t)8U * sizeof (uint64_t));
+  memset(res, 0U, 8U * sizeof (uint64_t));
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *ab = x;
     uint64_t a_j = x[i0];
     uint64_t *res_j = res + i0;
-    uint64_t c = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < i0 / (uint32_t)4U; i++)
+    uint64_t c = 0ULL;
+    for (uint32_t i = 0U; i < i0 / 4U; i++)
     {
-      uint64_t a_i = ab[(uint32_t)4U * i];
-      uint64_t *res_i0 = res_j + (uint32_t)4U * i;
+      uint64_t a_i = ab[4U * i];
+      uint64_t *res_i0 = res_j + 4U * i;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, a_j, c, res_i0);
-      uint64_t a_i0 = ab[(uint32_t)4U * i + (uint32_t)1U];
-      uint64_t *res_i1 = res_j + (uint32_t)4U * i + (uint32_t)1U;
+      uint64_t a_i0 = ab[4U * i + 1U];
+      uint64_t *res_i1 = res_j + 4U * i + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, a_j, c, res_i1);
-      uint64_t a_i1 = ab[(uint32_t)4U * i + (uint32_t)2U];
-      uint64_t *res_i2 = res_j + (uint32_t)4U * i + (uint32_t)2U;
+      uint64_t a_i1 = ab[4U * i + 2U];
+      uint64_t *res_i2 = res_j + 4U * i + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, a_j, c, res_i2);
-      uint64_t a_i2 = ab[(uint32_t)4U * i + (uint32_t)3U];
-      uint64_t *res_i = res_j + (uint32_t)4U * i + (uint32_t)3U;
+      uint64_t a_i2 = ab[4U * i + 3U];
+      uint64_t *res_i = res_j + 4U * i + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, a_j, c, res_i);
     }
-    for (uint32_t i = i0 / (uint32_t)4U * (uint32_t)4U; i < i0; i++)
+    for (uint32_t i = i0 / 4U * 4U; i < i0; i++)
     {
       uint64_t a_i = ab[i];
       uint64_t *res_i = res_j + i;
@@ -277,41 +277,37 @@ static inline void bn_sqr4(uint64_t *res, uint64_t *x)
     }
     uint64_t r = c;
     res[i0 + i0] = r;);
-  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, res, res);
-  KRML_HOST_IGNORE(c0);
+  uint64_t c0 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, res, res);
+  KRML_MAYBE_UNUSED_VAR(c0);
   uint64_t tmp[8U] = { 0U };
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     FStar_UInt128_uint128 res1 = FStar_UInt128_mul_wide(x[i], x[i]);
-    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, (uint32_t)64U));
+    uint64_t hi = FStar_UInt128_uint128_to_uint64(FStar_UInt128_shift_right(res1, 64U));
     uint64_t lo = FStar_UInt128_uint128_to_uint64(res1);
-    tmp[(uint32_t)2U * i] = lo;
-    tmp[(uint32_t)2U * i + (uint32_t)1U] = hi;);
-  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64((uint32_t)8U, res, tmp, res);
-  KRML_HOST_IGNORE(c1);
+    tmp[2U * i] = lo;
+    tmp[2U * i + 1U] = hi;);
+  uint64_t c1 = Hacl_Bignum_Addition_bn_add_eq_len_u64(8U, res, tmp, res);
+  KRML_MAYBE_UNUSED_VAR(c1);
 }
 
 static inline void bn_to_bytes_be4(uint8_t *res, uint64_t *f)
 {
   uint8_t tmp[32U] = { 0U };
-  KRML_HOST_IGNORE(tmp);
-  KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    store64_be(res + i * (uint32_t)8U, f[(uint32_t)4U - i - (uint32_t)1U]););
+  KRML_MAYBE_UNUSED_VAR(tmp);
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, store64_be(res + i * 8U, f[4U - i - 1U]););
 }
 
 static inline void bn_from_bytes_be4(uint64_t *res, uint8_t *b)
 {
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
-    uint64_t u = load64_be(b + ((uint32_t)4U - i - (uint32_t)1U) * (uint32_t)8U);
+    uint64_t u = load64_be(b + (4U - i - 1U) * 8U);
     uint64_t x = u;
     os[i] = x;);
 }
@@ -319,79 +315,79 @@ static inline void bn_from_bytes_be4(uint64_t *res, uint8_t *b)
 static inline void bn2_to_bytes_be4(uint8_t *res, uint64_t *x, uint64_t *y)
 {
   bn_to_bytes_be4(res, x);
-  bn_to_bytes_be4(res + (uint32_t)32U, y);
+  bn_to_bytes_be4(res + 32U, y);
 }
 
 static inline void make_prime(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xffffffffffffffffU;
-  n[1U] = (uint64_t)0xffffffffU;
-  n[2U] = (uint64_t)0x0U;
-  n[3U] = (uint64_t)0xffffffff00000001U;
+  n[0U] = 0xffffffffffffffffULL;
+  n[1U] = 0xffffffffULL;
+  n[2U] = 0x0ULL;
+  n[3U] = 0xffffffff00000001ULL;
 }
 
 static inline void make_order(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xf3b9cac2fc632551U;
-  n[1U] = (uint64_t)0xbce6faada7179e84U;
-  n[2U] = (uint64_t)0xffffffffffffffffU;
-  n[3U] = (uint64_t)0xffffffff00000000U;
+  n[0U] = 0xf3b9cac2fc632551ULL;
+  n[1U] = 0xbce6faada7179e84ULL;
+  n[2U] = 0xffffffffffffffffULL;
+  n[3U] = 0xffffffff00000000ULL;
 }
 
 static inline void make_a_coeff(uint64_t *a)
 {
-  a[0U] = (uint64_t)0xfffffffffffffffcU;
-  a[1U] = (uint64_t)0x3ffffffffU;
-  a[2U] = (uint64_t)0x0U;
-  a[3U] = (uint64_t)0xfffffffc00000004U;
+  a[0U] = 0xfffffffffffffffcULL;
+  a[1U] = 0x3ffffffffULL;
+  a[2U] = 0x0ULL;
+  a[3U] = 0xfffffffc00000004ULL;
 }
 
 static inline void make_b_coeff(uint64_t *b)
 {
-  b[0U] = (uint64_t)0xd89cdf6229c4bddfU;
-  b[1U] = (uint64_t)0xacf005cd78843090U;
-  b[2U] = (uint64_t)0xe5a220abf7212ed6U;
-  b[3U] = (uint64_t)0xdc30061d04874834U;
+  b[0U] = 0xd89cdf6229c4bddfULL;
+  b[1U] = 0xacf005cd78843090ULL;
+  b[2U] = 0xe5a220abf7212ed6ULL;
+  b[3U] = 0xdc30061d04874834ULL;
 }
 
 static inline void make_g_x(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x79e730d418a9143cU;
-  n[1U] = (uint64_t)0x75ba95fc5fedb601U;
-  n[2U] = (uint64_t)0x79fb732b77622510U;
-  n[3U] = (uint64_t)0x18905f76a53755c6U;
+  n[0U] = 0x79e730d418a9143cULL;
+  n[1U] = 0x75ba95fc5fedb601ULL;
+  n[2U] = 0x79fb732b77622510ULL;
+  n[3U] = 0x18905f76a53755c6ULL;
 }
 
 static inline void make_g_y(uint64_t *n)
 {
-  n[0U] = (uint64_t)0xddf25357ce95560aU;
-  n[1U] = (uint64_t)0x8b4ab8e4ba19e45cU;
-  n[2U] = (uint64_t)0xd2e88688dd21f325U;
-  n[3U] = (uint64_t)0x8571ff1825885d85U;
+  n[0U] = 0xddf25357ce95560aULL;
+  n[1U] = 0x8b4ab8e4ba19e45cULL;
+  n[2U] = 0xd2e88688dd21f325ULL;
+  n[3U] = 0x8571ff1825885d85ULL;
 }
 
 static inline void make_fmont_R2(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x3U;
-  n[1U] = (uint64_t)0xfffffffbffffffffU;
-  n[2U] = (uint64_t)0xfffffffffffffffeU;
-  n[3U] = (uint64_t)0x4fffffffdU;
+  n[0U] = 0x3ULL;
+  n[1U] = 0xfffffffbffffffffULL;
+  n[2U] = 0xfffffffffffffffeULL;
+  n[3U] = 0x4fffffffdULL;
 }
 
 static inline void make_fzero(uint64_t *n)
 {
-  n[0U] = (uint64_t)0U;
-  n[1U] = (uint64_t)0U;
-  n[2U] = (uint64_t)0U;
-  n[3U] = (uint64_t)0U;
+  n[0U] = 0ULL;
+  n[1U] = 0ULL;
+  n[2U] = 0ULL;
+  n[3U] = 0ULL;
 }
 
 static inline void make_fone(uint64_t *n)
 {
-  n[0U] = (uint64_t)0x1U;
-  n[1U] = (uint64_t)0xffffffff00000000U;
-  n[2U] = (uint64_t)0xffffffffffffffffU;
-  n[3U] = (uint64_t)0xfffffffeU;
+  n[0U] = 0x1ULL;
+  n[1U] = 0xffffffff00000000ULL;
+  n[2U] = 0xffffffffffffffffULL;
+  n[3U] = 0xfffffffeULL;
 }
 
 static inline uint64_t bn_is_lt_prime_mask4(uint64_t *f)
@@ -399,7 +395,7 @@ static inline uint64_t bn_is_lt_prime_mask4(uint64_t *f)
   uint64_t tmp[4U] = { 0U };
   make_prime(tmp);
   uint64_t c = bn_sub4(tmp, f, tmp);
-  return (uint64_t)0U - c;
+  return 0ULL - c;
 }
 
 static inline uint64_t feq_mask(uint64_t *a, uint64_t *b)
@@ -435,61 +431,61 @@ static inline void mont_reduction(uint64_t *res, uint64_t *x)
 {
   uint64_t n[4U] = { 0U };
   make_prime(n);
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = (uint64_t)1U * x[i0];
+    0U,
+    4U,
+    1U,
+    uint64_t qj = 1ULL * x[i0];
     uint64_t *res_j0 = x + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = x + (uint32_t)4U + i0;
-    uint64_t res_j = x[(uint32_t)4U + i0];
+    uint64_t *resb = x + 4U + i0;
+    uint64_t res_j = x[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, x + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -512,7 +508,7 @@ static inline void fsqr0(uint64_t *res, uint64_t *x)
 static inline void from_mont(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, a, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, a, 4U * sizeof (uint64_t));
   mont_reduction(res, tmp);
 }
 
@@ -540,105 +536,105 @@ static inline void finv(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *x30 = tmp;
-  uint64_t *x2 = tmp + (uint32_t)4U;
-  uint64_t *tmp1 = tmp + (uint32_t)8U;
-  uint64_t *tmp2 = tmp + (uint32_t)12U;
-  memcpy(x2, a, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *x2 = tmp + 4U;
+  uint64_t *tmp1 = tmp + 8U;
+  uint64_t *tmp2 = tmp + 12U;
+  memcpy(x2, a, 4U * sizeof (uint64_t));
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, a);
-  memcpy(x30, x2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x30, x2, 4U * sizeof (uint64_t));
   {
     fsqr0(x30, x30);
   }
   fmul0(x30, x30, a);
-  memcpy(tmp1, x30, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, x30, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x30);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x30);
-  memcpy(x30, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR15(i, (uint32_t)0U, (uint32_t)15U, (uint32_t)1U, fsqr0(x30, x30););
+  memcpy(x30, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR15(i, 0U, 15U, 1U, fsqr0(x30, x30););
   fmul0(x30, x30, tmp1);
-  memcpy(tmp1, x30, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, x30, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, x2);
-  memcpy(x2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  memcpy(x2, tmp1, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)128U; i++)
+  for (uint32_t i = 0U; i < 128U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, tmp1);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, tmp1);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)30U; i++)
+  for (uint32_t i = 0U; i < 30U; i++)
   {
     fsqr0(x2, x2);
   }
   fmul0(x2, x2, x30);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(x2, x2););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(x2, x2););
   fmul0(tmp1, x2, a);
-  memcpy(res, tmp1, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, tmp1, 4U * sizeof (uint64_t));
 }
 
 static inline void fsqrt(uint64_t *res, uint64_t *a)
 {
   uint64_t tmp[8U] = { 0U };
   uint64_t *tmp1 = tmp;
-  uint64_t *tmp2 = tmp + (uint32_t)4U;
-  memcpy(tmp1, a, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *tmp2 = tmp + 4U;
+  memcpy(tmp1, a, 4U * sizeof (uint64_t));
   {
     fsqr0(tmp1, tmp1);
   }
   fmul0(tmp1, tmp1, a);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, tmp2);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, fsqr0(tmp2, tmp2););
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, fsqr0(tmp2, tmp2););
   fmul0(tmp2, tmp2, tmp1);
-  memcpy(tmp1, tmp2, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, fsqr0(tmp1, tmp1););
+  memcpy(tmp1, tmp2, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, fsqr0(tmp1, tmp1););
   fmul0(tmp1, tmp1, tmp2);
-  memcpy(tmp2, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  memcpy(tmp2, tmp1, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
   fmul0(tmp2, tmp2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)96U; i++)
+  for (uint32_t i = 0U; i < 96U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
   fmul0(tmp2, tmp2, a);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)94U; i++)
+  for (uint32_t i = 0U; i < 94U; i++)
   {
     fsqr0(tmp2, tmp2);
   }
-  memcpy(res, tmp2, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, tmp2, 4U * sizeof (uint64_t));
 }
 
 static inline void make_base_point(uint64_t *p)
 {
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z = p + 8U;
   make_g_x(x);
   make_g_y(y);
   make_fone(z);
@@ -647,8 +643,8 @@ static inline void make_base_point(uint64_t *p)
 static inline void make_point_at_inf(uint64_t *p)
 {
   uint64_t *x = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z = p + 8U;
   make_fzero(x);
   make_fone(y);
   make_fzero(z);
@@ -656,7 +652,7 @@ static inline void make_point_at_inf(uint64_t *p)
 
 static inline bool is_point_at_inf_vartime(uint64_t *p)
 {
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *pz = p + 8U;
   return bn_is_zero_vartime4(pz);
 }
 
@@ -664,10 +660,10 @@ static inline void to_aff_point(uint64_t *res, uint64_t *p)
 {
   uint64_t zinv[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *py = p + 4U;
+  uint64_t *pz = p + 8U;
   uint64_t *x = res;
-  uint64_t *y = res + (uint32_t)4U;
+  uint64_t *y = res + 4U;
   finv(zinv, pz);
   fmul0(x, px, zinv);
   fmul0(y, py, zinv);
@@ -679,7 +675,7 @@ static inline void to_aff_point_x(uint64_t *res, uint64_t *p)
 {
   uint64_t zinv[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *pz = p + (uint32_t)8U;
+  uint64_t *pz = p + 8U;
   finv(zinv, pz);
   fmul0(res, px, zinv);
   from_mont(res, res);
@@ -688,10 +684,10 @@ static inline void to_aff_point_x(uint64_t *res, uint64_t *p)
 static inline void to_proj_point(uint64_t *res, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   uint64_t *rx = res;
-  uint64_t *ry = res + (uint32_t)4U;
-  uint64_t *rz = res + (uint32_t)8U;
+  uint64_t *ry = res + 4U;
+  uint64_t *rz = res + 8U;
   to_mont(rx, px);
   to_mont(ry, py);
   make_fone(rz);
@@ -703,7 +699,7 @@ static inline bool is_on_curve_vartime(uint64_t *p)
   uint64_t tx[4U] = { 0U };
   uint64_t ty[4U] = { 0U };
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   to_mont(tx, px);
   to_mont(ty, py);
   uint64_t tmp[4U] = { 0U };
@@ -715,14 +711,14 @@ static inline bool is_on_curve_vartime(uint64_t *p)
   fadd0(rp, tmp, rp);
   fsqr0(ty, ty);
   uint64_t r = feq_mask(ty, rp);
-  bool r0 = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool r0 = r == 0xFFFFFFFFFFFFFFFFULL;
   return r0;
 }
 
 static inline void aff_point_store(uint8_t *res, uint64_t *p)
 {
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   bn2_to_bytes_be4(res, px, py);
 }
 
@@ -736,17 +732,17 @@ static inline void point_store(uint8_t *res, uint64_t *p)
 static inline bool aff_point_load_vartime(uint64_t *p, uint8_t *b)
 {
   uint8_t *p_x = b;
-  uint8_t *p_y = b + (uint32_t)32U;
+  uint8_t *p_y = b + 32U;
   uint64_t *bn_p_x = p;
-  uint64_t *bn_p_y = p + (uint32_t)4U;
+  uint64_t *bn_p_y = p + 4U;
   bn_from_bytes_be4(bn_p_x, p_x);
   bn_from_bytes_be4(bn_p_y, p_y);
   uint64_t *px = p;
-  uint64_t *py = p + (uint32_t)4U;
+  uint64_t *py = p + 4U;
   uint64_t lessX = bn_is_lt_prime_mask4(px);
   uint64_t lessY = bn_is_lt_prime_mask4(py);
   uint64_t res = lessX & lessY;
-  bool is_xy_valid = res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_xy_valid = res == 0xFFFFFFFFFFFFFFFFULL;
   if (!is_xy_valid)
   {
     return false;
@@ -769,15 +765,15 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
 {
   uint8_t s0 = s[0U];
   uint8_t s01 = s0;
-  if (!(s01 == (uint8_t)0x02U || s01 == (uint8_t)0x03U))
+  if (!(s01 == 0x02U || s01 == 0x03U))
   {
     return false;
   }
-  uint8_t *xb = s + (uint32_t)1U;
+  uint8_t *xb = s + 1U;
   bn_from_bytes_be4(x, xb);
   uint64_t is_x_valid = bn_is_lt_prime_mask4(x);
-  bool is_x_valid1 = is_x_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  bool is_y_odd = s01 == (uint8_t)0x03U;
+  bool is_x_valid1 = is_x_valid == 0xFFFFFFFFFFFFFFFFULL;
+  bool is_y_odd = s01 == 0x03U;
   if (!is_x_valid1)
   {
     return false;
@@ -797,14 +793,14 @@ static inline bool aff_point_decompress_vartime(uint64_t *x, uint64_t *y, uint8_
   from_mont(y, yM);
   fsqr0(yM, yM);
   uint64_t r = feq_mask(yM, y2M);
-  bool is_y_valid = r == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_y_valid = r == 0xFFFFFFFFFFFFFFFFULL;
   bool is_y_valid0 = is_y_valid;
   if (!is_y_valid0)
   {
     return false;
   }
-  uint64_t is_y_odd1 = y[0U] & (uint64_t)1U;
-  bool is_y_odd2 = is_y_odd1 == (uint64_t)1U;
+  uint64_t is_y_odd1 = y[0U] & 1ULL;
+  bool is_y_odd2 = is_y_odd1 == 1ULL;
   fnegate_conditional_vartime(y, is_y_odd2 != is_y_odd);
   return true;
 }
@@ -813,18 +809,18 @@ static inline void point_double(uint64_t *res, uint64_t *p)
 {
   uint64_t tmp[20U] = { 0U };
   uint64_t *x = p;
-  uint64_t *z = p + (uint32_t)8U;
+  uint64_t *z = p + 8U;
   uint64_t *x3 = res;
-  uint64_t *y3 = res + (uint32_t)4U;
-  uint64_t *z3 = res + (uint32_t)8U;
+  uint64_t *y3 = res + 4U;
+  uint64_t *z3 = res + 8U;
   uint64_t *t0 = tmp;
-  uint64_t *t1 = tmp + (uint32_t)4U;
-  uint64_t *t2 = tmp + (uint32_t)8U;
-  uint64_t *t3 = tmp + (uint32_t)12U;
-  uint64_t *t4 = tmp + (uint32_t)16U;
+  uint64_t *t1 = tmp + 4U;
+  uint64_t *t2 = tmp + 8U;
+  uint64_t *t3 = tmp + 12U;
+  uint64_t *t4 = tmp + 16U;
   uint64_t *x1 = p;
-  uint64_t *y = p + (uint32_t)4U;
-  uint64_t *z1 = p + (uint32_t)8U;
+  uint64_t *y = p + 4U;
+  uint64_t *z1 = p + 8U;
   fsqr0(t0, x1);
   fsqr0(t1, y);
   fsqr0(t2, z1);
@@ -865,22 +861,22 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
 {
   uint64_t tmp[36U] = { 0U };
   uint64_t *t0 = tmp;
-  uint64_t *t1 = tmp + (uint32_t)24U;
+  uint64_t *t1 = tmp + 24U;
   uint64_t *x3 = t1;
-  uint64_t *y3 = t1 + (uint32_t)4U;
-  uint64_t *z3 = t1 + (uint32_t)8U;
+  uint64_t *y3 = t1 + 4U;
+  uint64_t *z3 = t1 + 8U;
   uint64_t *t01 = t0;
-  uint64_t *t11 = t0 + (uint32_t)4U;
-  uint64_t *t2 = t0 + (uint32_t)8U;
-  uint64_t *t3 = t0 + (uint32_t)12U;
-  uint64_t *t4 = t0 + (uint32_t)16U;
-  uint64_t *t5 = t0 + (uint32_t)20U;
+  uint64_t *t11 = t0 + 4U;
+  uint64_t *t2 = t0 + 8U;
+  uint64_t *t3 = t0 + 12U;
+  uint64_t *t4 = t0 + 16U;
+  uint64_t *t5 = t0 + 20U;
   uint64_t *x1 = p;
-  uint64_t *y1 = p + (uint32_t)4U;
-  uint64_t *z10 = p + (uint32_t)8U;
+  uint64_t *y1 = p + 4U;
+  uint64_t *z10 = p + 8U;
   uint64_t *x20 = q;
-  uint64_t *y20 = q + (uint32_t)4U;
-  uint64_t *z20 = q + (uint32_t)8U;
+  uint64_t *y20 = q + 4U;
+  uint64_t *z20 = q + 8U;
   fmul0(t01, x1, x20);
   fmul0(t11, y1, y20);
   fmul0(t2, z10, z20);
@@ -888,10 +884,10 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fadd0(t4, x20, y20);
   fmul0(t3, t3, t4);
   fadd0(t4, t01, t11);
-  uint64_t *y10 = p + (uint32_t)4U;
-  uint64_t *z11 = p + (uint32_t)8U;
-  uint64_t *y2 = q + (uint32_t)4U;
-  uint64_t *z21 = q + (uint32_t)8U;
+  uint64_t *y10 = p + 4U;
+  uint64_t *z11 = p + 8U;
+  uint64_t *y2 = q + 4U;
+  uint64_t *z21 = q + 8U;
   fsub0(t3, t3, t4);
   fadd0(t4, y10, z11);
   fadd0(t5, y2, z21);
@@ -899,9 +895,9 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fadd0(t5, t11, t2);
   fsub0(t4, t4, t5);
   uint64_t *x10 = p;
-  uint64_t *z1 = p + (uint32_t)8U;
+  uint64_t *z1 = p + 8U;
   uint64_t *x2 = q;
-  uint64_t *z2 = q + (uint32_t)8U;
+  uint64_t *z2 = q + 8U;
   fadd0(x3, x10, z1);
   fadd0(y3, x2, z2);
   fmul0(x3, x3, y3);
@@ -932,7 +928,7 @@ static inline void point_add(uint64_t *res, uint64_t *p, uint64_t *q)
   fmul0(z3, t4, z3);
   fmul0(t11, t3, t01);
   fadd0(z3, z3, t11);
-  memcpy(res, t1, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(res, t1, 12U * sizeof (uint64_t));
 }
 
 static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
@@ -940,41 +936,37 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
   uint64_t table[192U] = { 0U };
   uint64_t tmp[12U] = { 0U };
   uint64_t *t0 = table;
-  uint64_t *t1 = table + (uint32_t)12U;
+  uint64_t *t1 = table + 12U;
   make_point_at_inf(t0);
-  memcpy(t1, p, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(t1, p, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR7(i,
-    (uint32_t)0U,
-    (uint32_t)7U,
-    (uint32_t)1U,
-    uint64_t *t11 = table + (i + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    7U,
+    1U,
+    uint64_t *t11 = table + (i + 1U) * 12U;
     point_double(tmp, t11);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t));
-    uint64_t *t2 = table + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;
+    memcpy(table + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t));
+    uint64_t *t2 = table + (2U * i + 2U) * 12U;
     point_add(tmp, p, t2);
-    memcpy(table + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t)););
+    memcpy(table + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t)););
   make_point_at_inf(res);
   uint64_t tmp0[12U] = { 0U };
-  for (uint32_t i0 = (uint32_t)0U; i0 < (uint32_t)64U; i0++)
+  for (uint32_t i0 = 0U; i0 < 64U; i0++)
   {
-    KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)256U - (uint32_t)4U * i0 - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar, k, (uint32_t)4U);
-    memcpy(tmp0, (uint64_t *)table, (uint32_t)12U * sizeof (uint64_t));
+    KRML_MAYBE_FOR4(i, 0U, 4U, 1U, point_double(res, res););
+    uint32_t k = 256U - 4U * i0 - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar, k, 4U);
+    memcpy(tmp0, (uint64_t *)table, 12U * sizeof (uint64_t));
     KRML_MAYBE_FOR15(i1,
-      (uint32_t)0U,
-      (uint32_t)15U,
-      (uint32_t)1U,
-      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + (uint32_t)1U));
-      const uint64_t *res_j = table + (i1 + (uint32_t)1U) * (uint32_t)12U;
+      0U,
+      15U,
+      1U,
+      uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i1 + 1U));
+      const uint64_t *res_j = table + (i1 + 1U) * 12U;
       KRML_MAYBE_FOR12(i,
-        (uint32_t)0U,
-        (uint32_t)12U,
-        (uint32_t)1U,
+        0U,
+        12U,
+        1U,
         uint64_t *os = tmp0;
         uint64_t x = (c & res_j[i]) | (~c & tmp0[i]);
         os[i] = x;););
@@ -984,17 +976,17 @@ static inline void point_mul(uint64_t *res, uint64_t *scalar, uint64_t *p)
 
 static inline void precomp_get_consttime(const uint64_t *table, uint64_t bits_l, uint64_t *tmp)
 {
-  memcpy(tmp, (uint64_t *)table, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(tmp, (uint64_t *)table, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i0,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + (uint32_t)1U));
-    const uint64_t *res_j = table + (i0 + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    15U,
+    1U,
+    uint64_t c = FStar_UInt64_eq_mask(bits_l, (uint64_t)(i0 + 1U));
+    const uint64_t *res_j = table + (i0 + 1U) * 12U;
     KRML_MAYBE_FOR12(i,
-      (uint32_t)0U,
-      (uint32_t)12U,
-      (uint32_t)1U,
+      0U,
+      12U,
+      1U,
       uint64_t *os = tmp;
       uint64_t x = (c & res_j[i]) | (~c & tmp[i]);
       os[i] = x;););
@@ -1007,64 +999,58 @@ static inline void point_mul_g(uint64_t *res, uint64_t *scalar)
   uint64_t
   q2[12U] =
     {
-      (uint64_t)1499621593102562565U, (uint64_t)16692369783039433128U,
-      (uint64_t)15337520135922861848U, (uint64_t)5455737214495366228U,
-      (uint64_t)17827017231032529600U, (uint64_t)12413621606240782649U,
-      (uint64_t)2290483008028286132U, (uint64_t)15752017553340844820U,
-      (uint64_t)4846430910634234874U, (uint64_t)10861682798464583253U,
-      (uint64_t)15404737222404363049U, (uint64_t)363586619281562022U
+      1499621593102562565ULL, 16692369783039433128ULL, 15337520135922861848ULL,
+      5455737214495366228ULL, 17827017231032529600ULL, 12413621606240782649ULL,
+      2290483008028286132ULL, 15752017553340844820ULL, 4846430910634234874ULL,
+      10861682798464583253ULL, 15404737222404363049ULL, 363586619281562022ULL
     };
   uint64_t
   q3[12U] =
     {
-      (uint64_t)14619254753077084366U, (uint64_t)13913835116514008593U,
-      (uint64_t)15060744674088488145U, (uint64_t)17668414598203068685U,
-      (uint64_t)10761169236902342334U, (uint64_t)15467027479157446221U,
-      (uint64_t)14989185522423469618U, (uint64_t)14354539272510107003U,
-      (uint64_t)14298211796392133693U, (uint64_t)13270323784253711450U,
-      (uint64_t)13380964971965046957U, (uint64_t)8686204248456909699U
+      14619254753077084366ULL, 13913835116514008593ULL, 15060744674088488145ULL,
+      17668414598203068685ULL, 10761169236902342334ULL, 15467027479157446221ULL,
+      14989185522423469618ULL, 14354539272510107003ULL, 14298211796392133693ULL,
+      13270323784253711450ULL, 13380964971965046957ULL, 8686204248456909699ULL
     };
   uint64_t
   q4[12U] =
     {
-      (uint64_t)7870395003430845958U, (uint64_t)18001862936410067720U,
-      (uint64_t)8006461232116967215U, (uint64_t)5921313779532424762U,
-      (uint64_t)10702113371959864307U, (uint64_t)8070517410642379879U,
-      (uint64_t)7139806720777708306U, (uint64_t)8253938546650739833U,
-      (uint64_t)17490482834545705718U, (uint64_t)1065249776797037500U,
-      (uint64_t)5018258455937968775U, (uint64_t)14100621120178668337U
+      7870395003430845958ULL, 18001862936410067720ULL, 8006461232116967215ULL,
+      5921313779532424762ULL, 10702113371959864307ULL, 8070517410642379879ULL,
+      7139806720777708306ULL, 8253938546650739833ULL, 17490482834545705718ULL,
+      1065249776797037500ULL, 5018258455937968775ULL, 14100621120178668337ULL
     };
   uint64_t *r1 = scalar;
-  uint64_t *r2 = scalar + (uint32_t)1U;
-  uint64_t *r3 = scalar + (uint32_t)2U;
-  uint64_t *r4 = scalar + (uint32_t)3U;
+  uint64_t *r2 = scalar + 1U;
+  uint64_t *r3 = scalar + 2U;
+  uint64_t *r4 = scalar + 3U;
   make_point_at_inf(res);
   uint64_t tmp[12U] = { 0U };
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    KRML_MAYBE_FOR4(i0, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r4, k, (uint32_t)4U);
+    0U,
+    16U,
+    1U,
+    KRML_MAYBE_FOR4(i0, 0U, 4U, 1U, point_double(res, res););
+    uint32_t k = 64U - 4U * i - 4U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r4, k, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_192_table_w4, bits_l, tmp);
     point_add(res, res, tmp);
-    uint32_t k0 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r3, k0, (uint32_t)4U);
+    uint32_t k0 = 64U - 4U * i - 4U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r3, k0, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_128_table_w4, bits_l0, tmp);
     point_add(res, res, tmp);
-    uint32_t k1 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r2, k1, (uint32_t)4U);
+    uint32_t k1 = 64U - 4U * i - 4U;
+    uint64_t bits_l1 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r2, k1, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_g_pow2_64_table_w4, bits_l1, tmp);
     point_add(res, res, tmp);
-    uint32_t k2 = (uint32_t)64U - (uint32_t)4U * i - (uint32_t)4U;
-    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)1U, r1, k2, (uint32_t)4U);
+    uint32_t k2 = 64U - 4U * i - 4U;
+    uint64_t bits_l2 = Hacl_Bignum_Lib_bn_get_bits_u64(1U, r1, k2, 4U);
     precomp_get_consttime(Hacl_P256_PrecompTable_precomp_basepoint_table_w4, bits_l2, tmp);
     point_add(res, res, tmp););
-  KRML_HOST_IGNORE(q1);
-  KRML_HOST_IGNORE(q2);
-  KRML_HOST_IGNORE(q3);
-  KRML_HOST_IGNORE(q4);
+  KRML_MAYBE_UNUSED_VAR(q1);
+  KRML_MAYBE_UNUSED_VAR(q2);
+  KRML_MAYBE_UNUSED_VAR(q3);
+  KRML_MAYBE_UNUSED_VAR(q4);
 }
 
 static inline void
@@ -1075,54 +1061,48 @@ point_mul_double_g(uint64_t *res, uint64_t *scalar1, uint64_t *scalar2, uint64_t
   uint64_t table2[384U] = { 0U };
   uint64_t tmp[12U] = { 0U };
   uint64_t *t0 = table2;
-  uint64_t *t1 = table2 + (uint32_t)12U;
+  uint64_t *t1 = table2 + 12U;
   make_point_at_inf(t0);
-  memcpy(t1, q2, (uint32_t)12U * sizeof (uint64_t));
+  memcpy(t1, q2, 12U * sizeof (uint64_t));
   KRML_MAYBE_FOR15(i,
-    (uint32_t)0U,
-    (uint32_t)15U,
-    (uint32_t)1U,
-    uint64_t *t11 = table2 + (i + (uint32_t)1U) * (uint32_t)12U;
+    0U,
+    15U,
+    1U,
+    uint64_t *t11 = table2 + (i + 1U) * 12U;
     point_double(tmp, t11);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t));
-    uint64_t *t2 = table2 + ((uint32_t)2U * i + (uint32_t)2U) * (uint32_t)12U;
+    memcpy(table2 + (2U * i + 2U) * 12U, tmp, 12U * sizeof (uint64_t));
+    uint64_t *t2 = table2 + (2U * i + 2U) * 12U;
     point_add(tmp, q2, t2);
-    memcpy(table2 + ((uint32_t)2U * i + (uint32_t)3U) * (uint32_t)12U,
-      tmp,
-      (uint32_t)12U * sizeof (uint64_t)););
+    memcpy(table2 + (2U * i + 3U) * 12U, tmp, 12U * sizeof (uint64_t)););
   uint64_t tmp0[12U] = { 0U };
-  uint32_t i0 = (uint32_t)255U;
-  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, i0, (uint32_t)5U);
+  uint32_t i0 = 255U;
+  uint64_t bits_c = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, i0, 5U);
   uint32_t bits_l32 = (uint32_t)bits_c;
-  const
-  uint64_t
-  *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * (uint32_t)12U;
-  memcpy(res, (uint64_t *)a_bits_l, (uint32_t)12U * sizeof (uint64_t));
-  uint32_t i1 = (uint32_t)255U;
-  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, i1, (uint32_t)5U);
+  const uint64_t *a_bits_l = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l32 * 12U;
+  memcpy(res, (uint64_t *)a_bits_l, 12U * sizeof (uint64_t));
+  uint32_t i1 = 255U;
+  uint64_t bits_c0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, i1, 5U);
   uint32_t bits_l320 = (uint32_t)bits_c0;
-  const uint64_t *a_bits_l0 = table2 + bits_l320 * (uint32_t)12U;
-  memcpy(tmp0, (uint64_t *)a_bits_l0, (uint32_t)12U * sizeof (uint64_t));
+  const uint64_t *a_bits_l0 = table2 + bits_l320 * 12U;
+  memcpy(tmp0, (uint64_t *)a_bits_l0, 12U * sizeof (uint64_t));
   point_add(res, res, tmp0);
   uint64_t tmp1[12U] = { 0U };
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)51U; i++)
+  for (uint32_t i = 0U; i < 51U; i++)
   {
-    KRML_MAYBE_FOR5(i2, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, point_double(res, res););
-    uint32_t k = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar2, k, (uint32_t)5U);
+    KRML_MAYBE_FOR5(i2, 0U, 5U, 1U, point_double(res, res););
+    uint32_t k = 255U - 5U * i - 5U;
+    uint64_t bits_l = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar2, k, 5U);
     uint32_t bits_l321 = (uint32_t)bits_l;
-    const uint64_t *a_bits_l1 = table2 + bits_l321 * (uint32_t)12U;
-    memcpy(tmp1, (uint64_t *)a_bits_l1, (uint32_t)12U * sizeof (uint64_t));
+    const uint64_t *a_bits_l1 = table2 + bits_l321 * 12U;
+    memcpy(tmp1, (uint64_t *)a_bits_l1, 12U * sizeof (uint64_t));
     point_add(res, res, tmp1);
-    uint32_t k0 = (uint32_t)255U - (uint32_t)5U * i - (uint32_t)5U;
-    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64((uint32_t)4U, scalar1, k0, (uint32_t)5U);
+    uint32_t k0 = 255U - 5U * i - 5U;
+    uint64_t bits_l0 = Hacl_Bignum_Lib_bn_get_bits_u64(4U, scalar1, k0, 5U);
     uint32_t bits_l322 = (uint32_t)bits_l0;
     const
     uint64_t
-    *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * (uint32_t)12U;
-    memcpy(tmp1, (uint64_t *)a_bits_l2, (uint32_t)12U * sizeof (uint64_t));
+    *a_bits_l2 = Hacl_P256_PrecompTable_precomp_basepoint_table_w5 + bits_l322 * 12U;
+    memcpy(tmp1, (uint64_t *)a_bits_l2, 12U * sizeof (uint64_t));
     point_add(res, res, tmp1);
   }
 }
@@ -1132,7 +1112,7 @@ static inline uint64_t bn_is_lt_order_mask4(uint64_t *f)
   uint64_t tmp[4U] = { 0U };
   make_order(tmp);
   uint64_t c = bn_sub4(tmp, f, tmp);
-  return (uint64_t)0U - c;
+  return 0ULL - c;
 }
 
 static inline uint64_t bn_is_lt_order_and_gt_zero_mask4(uint64_t *f)
@@ -1161,61 +1141,61 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x)
 {
   uint64_t n[4U] = { 0U };
   make_order(n);
-  uint64_t c0 = (uint64_t)0U;
+  uint64_t c0 = 0ULL;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
-    uint64_t qj = (uint64_t)0xccd1c8aaee00bc4fU * x[i0];
+    0U,
+    4U,
+    1U,
+    uint64_t qj = 0xccd1c8aaee00bc4fULL * x[i0];
     uint64_t *res_j0 = x + i0;
-    uint64_t c = (uint64_t)0U;
+    uint64_t c = 0ULL;
     {
-      uint64_t a_i = n[(uint32_t)4U * (uint32_t)0U];
-      uint64_t *res_i0 = res_j0 + (uint32_t)4U * (uint32_t)0U;
+      uint64_t a_i = n[4U * 0U];
+      uint64_t *res_i0 = res_j0 + 4U * 0U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i, qj, c, res_i0);
-      uint64_t a_i0 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-      uint64_t *res_i1 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+      uint64_t a_i0 = n[4U * 0U + 1U];
+      uint64_t *res_i1 = res_j0 + 4U * 0U + 1U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i0, qj, c, res_i1);
-      uint64_t a_i1 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-      uint64_t *res_i2 = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+      uint64_t a_i1 = n[4U * 0U + 2U];
+      uint64_t *res_i2 = res_j0 + 4U * 0U + 2U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i1, qj, c, res_i2);
-      uint64_t a_i2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-      uint64_t *res_i = res_j0 + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+      uint64_t a_i2 = n[4U * 0U + 3U];
+      uint64_t *res_i = res_j0 + 4U * 0U + 3U;
       c = Hacl_Bignum_Base_mul_wide_add2_u64(a_i2, qj, c, res_i);
     }
     uint64_t r = c;
     uint64_t c1 = r;
-    uint64_t *resb = x + (uint32_t)4U + i0;
-    uint64_t res_j = x[(uint32_t)4U + i0];
+    uint64_t *resb = x + 4U + i0;
+    uint64_t res_j = x[4U + i0];
     c0 = Lib_IntTypes_Intrinsics_add_carry_u64(c0, c1, res_j, resb););
-  memcpy(res, x + (uint32_t)4U, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(res, x + 4U, 4U * sizeof (uint64_t));
   uint64_t c00 = c0;
   uint64_t tmp[4U] = { 0U };
-  uint64_t c = (uint64_t)0U;
+  uint64_t c = 0ULL;
   {
-    uint64_t t1 = res[(uint32_t)4U * (uint32_t)0U];
-    uint64_t t20 = n[(uint32_t)4U * (uint32_t)0U];
-    uint64_t *res_i0 = tmp + (uint32_t)4U * (uint32_t)0U;
+    uint64_t t1 = res[4U * 0U];
+    uint64_t t20 = n[4U * 0U];
+    uint64_t *res_i0 = tmp + 4U * 0U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, t20, res_i0);
-    uint64_t t10 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t t21 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)1U];
-    uint64_t *res_i1 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)1U;
+    uint64_t t10 = res[4U * 0U + 1U];
+    uint64_t t21 = n[4U * 0U + 1U];
+    uint64_t *res_i1 = tmp + 4U * 0U + 1U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t10, t21, res_i1);
-    uint64_t t11 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t t22 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)2U];
-    uint64_t *res_i2 = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)2U;
+    uint64_t t11 = res[4U * 0U + 2U];
+    uint64_t t22 = n[4U * 0U + 2U];
+    uint64_t *res_i2 = tmp + 4U * 0U + 2U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t11, t22, res_i2);
-    uint64_t t12 = res[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t t2 = n[(uint32_t)4U * (uint32_t)0U + (uint32_t)3U];
-    uint64_t *res_i = tmp + (uint32_t)4U * (uint32_t)0U + (uint32_t)3U;
+    uint64_t t12 = res[4U * 0U + 3U];
+    uint64_t t2 = n[4U * 0U + 3U];
+    uint64_t *res_i = tmp + 4U * 0U + 3U;
     c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t12, t2, res_i);
   }
   uint64_t c1 = c;
   uint64_t c2 = c00 - c1;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = res;
     uint64_t x1 = (c2 & res[i]) | (~c2 & tmp[i]);
     os[i] = x1;);
@@ -1224,7 +1204,7 @@ static inline void qmont_reduction(uint64_t *res, uint64_t *x)
 static inline void from_qmont(uint64_t *res, uint64_t *x)
 {
   uint64_t tmp[8U] = { 0U };
-  memcpy(tmp, x, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(tmp, x, 4U * sizeof (uint64_t));
   qmont_reduction(res, tmp);
 }
 
@@ -1246,18 +1226,18 @@ bool Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key)
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *sk = tmp;
-  uint64_t *pk = tmp + (uint32_t)4U;
+  uint64_t *pk = tmp + 4U;
   bn_from_bytes_be4(sk, private_key);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -1265,7 +1245,7 @@ bool Hacl_Impl_P256_DH_ecp256dh_i(uint8_t *public_key, uint8_t *private_key)
   uint64_t is_sk_valid = is_b_valid;
   point_mul_g(pk, sk);
   point_store(public_key, pk);
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 bool
@@ -1277,19 +1257,19 @@ Hacl_Impl_P256_DH_ecp256dh_r(
 {
   uint64_t tmp[16U] = { 0U };
   uint64_t *sk = tmp;
-  uint64_t *pk = tmp + (uint32_t)4U;
+  uint64_t *pk = tmp + 4U;
   bool is_pk_valid = load_point_vartime(pk, their_pubkey);
   bn_from_bytes_be4(sk, private_key);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(sk);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = sk;
     uint64_t uu____0 = oneq[i];
     uint64_t x = uu____0 ^ (is_b_valid & (sk[i] ^ uu____0));
@@ -1301,27 +1281,27 @@ Hacl_Impl_P256_DH_ecp256dh_r(
     point_mul(ss_proj, sk, pk);
     point_store(shared_secret, ss_proj);
   }
-  return is_sk_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU && is_pk_valid;
+  return is_sk_valid == 0xFFFFFFFFFFFFFFFFULL && is_pk_valid;
 }
 
 static inline void qinv(uint64_t *res, uint64_t *r)
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *x6 = tmp;
-  uint64_t *x_11 = tmp + (uint32_t)4U;
-  uint64_t *x_101 = tmp + (uint32_t)8U;
-  uint64_t *x_111 = tmp + (uint32_t)12U;
-  uint64_t *x_1111 = tmp + (uint32_t)16U;
-  uint64_t *x_10101 = tmp + (uint32_t)20U;
-  uint64_t *x_101111 = tmp + (uint32_t)24U;
-  memcpy(x6, r, (uint32_t)4U * sizeof (uint64_t));
+  uint64_t *x_11 = tmp + 4U;
+  uint64_t *x_101 = tmp + 8U;
+  uint64_t *x_111 = tmp + 12U;
+  uint64_t *x_1111 = tmp + 16U;
+  uint64_t *x_10101 = tmp + 20U;
+  uint64_t *x_101111 = tmp + 24U;
+  memcpy(x6, r, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
   qmul(x_11, x6, r);
   qmul(x_101, x6, x_11);
   qmul(x_111, x6, x_101);
-  memcpy(x6, x_101, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, x_101, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
@@ -1330,86 +1310,86 @@ static inline void qinv(uint64_t *res, uint64_t *r)
     qsqr(x6, x6);
   }
   qmul(x_10101, x6, r);
-  memcpy(x6, x_10101, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, x_10101, 4U * sizeof (uint64_t));
   {
     qsqr(x6, x6);
   }
   qmul(x_101111, x_101, x6);
   qmul(x6, x_10101, x6);
   uint64_t tmp1[4U] = { 0U };
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(x6, x6););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(x6, x6););
   qmul(x6, x6, x_11);
-  memcpy(tmp1, x6, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR8(i, (uint32_t)0U, (uint32_t)8U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  memcpy(tmp1, x6, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x6);
-  memcpy(x6, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  KRML_MAYBE_FOR16(i, (uint32_t)0U, (uint32_t)16U, (uint32_t)1U, qsqr(x6, x6););
+  memcpy(x6, tmp1, 4U * sizeof (uint64_t));
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, qsqr(x6, x6););
   qmul(x6, x6, tmp1);
-  memcpy(tmp1, x6, (uint32_t)4U * sizeof (uint64_t));
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)64U; i++)
+  memcpy(tmp1, x6, 4U * sizeof (uint64_t));
+  for (uint32_t i = 0U; i < 64U; i++)
   {
     qsqr(tmp1, tmp1);
   }
   qmul(tmp1, tmp1, x6);
-  for (uint32_t i = (uint32_t)0U; i < (uint32_t)32U; i++)
+  for (uint32_t i = 0U; i < 32U; i++)
   {
     qsqr(tmp1, tmp1);
   }
   qmul(tmp1, tmp1, x6);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_10101);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR9(i, (uint32_t)0U, (uint32_t)9U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR9(i, 0U, 9U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR4(i, 0U, 4U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_111);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR10(i, (uint32_t)0U, (uint32_t)10U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR10(i, 0U, 10U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_101111);
-  KRML_MAYBE_FOR2(i, (uint32_t)0U, (uint32_t)2U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR2(i, 0U, 2U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR5(i, (uint32_t)0U, (uint32_t)5U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR5(i, 0U, 5U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_11);
-  KRML_MAYBE_FOR3(i, (uint32_t)0U, (uint32_t)3U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR3(i, 0U, 3U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, r);
-  KRML_MAYBE_FOR7(i, (uint32_t)0U, (uint32_t)7U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR7(i, 0U, 7U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_10101);
-  KRML_MAYBE_FOR6(i, (uint32_t)0U, (uint32_t)6U, (uint32_t)1U, qsqr(tmp1, tmp1););
+  KRML_MAYBE_FOR6(i, 0U, 6U, 1U, qsqr(tmp1, tmp1););
   qmul(tmp1, tmp1, x_1111);
-  memcpy(x6, tmp1, (uint32_t)4U * sizeof (uint64_t));
-  memcpy(res, x6, (uint32_t)4U * sizeof (uint64_t));
+  memcpy(x6, tmp1, 4U * sizeof (uint64_t));
+  memcpy(res, x6, 4U * sizeof (uint64_t));
 }
 
 static inline void qmul_mont(uint64_t *sinv, uint64_t *b, uint64_t *res)
@@ -1429,20 +1409,16 @@ ecdsa_verify_msg_as_qelem(
 {
   uint64_t tmp[28U] = { 0U };
   uint64_t *pk = tmp;
-  uint64_t *r_q = tmp + (uint32_t)12U;
-  uint64_t *s_q = tmp + (uint32_t)16U;
-  uint64_t *u1 = tmp + (uint32_t)20U;
-  uint64_t *u2 = tmp + (uint32_t)24U;
+  uint64_t *r_q = tmp + 12U;
+  uint64_t *s_q = tmp + 16U;
+  uint64_t *u1 = tmp + 20U;
+  uint64_t *u2 = tmp + 24U;
   bool is_pk_valid = load_point_vartime(pk, public_key);
   bn_from_bytes_be4(r_q, signature_r);
   bn_from_bytes_be4(s_q, signature_s);
   uint64_t is_r_valid = bn_is_lt_order_and_gt_zero_mask4(r_q);
   uint64_t is_s_valid = bn_is_lt_order_and_gt_zero_mask4(s_q);
-  bool
-  is_rs_valid =
-    is_r_valid
-    == (uint64_t)0xFFFFFFFFFFFFFFFFU
-    && is_s_valid == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool is_rs_valid = is_r_valid == 0xFFFFFFFFFFFFFFFFULL && is_s_valid == 0xFFFFFFFFFFFFFFFFULL;
   if (!(is_pk_valid && is_rs_valid))
   {
     return false;
@@ -1474,20 +1450,20 @@ ecdsa_sign_msg_as_qelem(
 {
   uint64_t rsdk_q[16U] = { 0U };
   uint64_t *r_q = rsdk_q;
-  uint64_t *s_q = rsdk_q + (uint32_t)4U;
-  uint64_t *d_a = rsdk_q + (uint32_t)8U;
-  uint64_t *k_q = rsdk_q + (uint32_t)12U;
+  uint64_t *s_q = rsdk_q + 4U;
+  uint64_t *d_a = rsdk_q + 8U;
+  uint64_t *k_q = rsdk_q + 12U;
   bn_from_bytes_be4(d_a, private_key);
   uint64_t is_b_valid0 = bn_is_lt_order_and_gt_zero_mask4(d_a);
   uint64_t oneq0[4U] = { 0U };
-  oneq0[0U] = (uint64_t)1U;
-  oneq0[1U] = (uint64_t)0U;
-  oneq0[2U] = (uint64_t)0U;
-  oneq0[3U] = (uint64_t)0U;
+  oneq0[0U] = 1ULL;
+  oneq0[1U] = 0ULL;
+  oneq0[2U] = 0ULL;
+  oneq0[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = d_a;
     uint64_t uu____0 = oneq0[i];
     uint64_t x = uu____0 ^ (is_b_valid0 & (d_a[i] ^ uu____0));
@@ -1496,14 +1472,14 @@ ecdsa_sign_msg_as_qelem(
   bn_from_bytes_be4(k_q, nonce);
   uint64_t is_b_valid = bn_is_lt_order_and_gt_zero_mask4(k_q);
   uint64_t oneq[4U] = { 0U };
-  oneq[0U] = (uint64_t)1U;
-  oneq[1U] = (uint64_t)0U;
-  oneq[2U] = (uint64_t)0U;
-  oneq[3U] = (uint64_t)0U;
+  oneq[0U] = 1ULL;
+  oneq[1U] = 0ULL;
+  oneq[2U] = 0ULL;
+  oneq[3U] = 0ULL;
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint64_t *os = k_q;
     uint64_t uu____1 = oneq[i];
     uint64_t x = uu____1 ^ (is_b_valid & (k_q[i] ^ uu____1));
@@ -1524,7 +1500,7 @@ ecdsa_sign_msg_as_qelem(
   uint64_t is_r_zero = bn_is_zero_mask4(r_q);
   uint64_t is_s_zero = bn_is_zero_mask4(s_q);
   uint64_t m = are_sk_nonce_valid & (~is_r_zero & ~is_s_zero);
-  bool res = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool res = m == 0xFFFFFFFFFFFFFFFFULL;
   return res;
 }
 
@@ -1571,8 +1547,8 @@ Hacl_P256_ecdsa_sign_p256_sha2(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_256(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1604,8 +1580,8 @@ Hacl_P256_ecdsa_sign_p256_sha384(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[48U] = { 0U };
-  Hacl_Streaming_SHA2_hash_384(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_384(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1637,8 +1613,8 @@ Hacl_P256_ecdsa_sign_p256_sha512(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[64U] = { 0U };
-  Hacl_Streaming_SHA2_hash_512(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_512(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1680,8 +1656,8 @@ Hacl_P256_ecdsa_sign_p256_without_hash(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  memcpy(mHash, msg, (uint32_t)32U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(msg_len);
+  memcpy(mHash, msg, 32U * sizeof (uint8_t));
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1716,8 +1692,8 @@ Hacl_P256_ecdsa_verif_p256_sha2(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  Hacl_Streaming_SHA2_hash_256(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_256(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1747,8 +1723,8 @@ Hacl_P256_ecdsa_verif_p256_sha384(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[48U] = { 0U };
-  Hacl_Streaming_SHA2_hash_384(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_384(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1778,8 +1754,8 @@ Hacl_P256_ecdsa_verif_p256_sha512(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[64U] = { 0U };
-  Hacl_Streaming_SHA2_hash_512(msg, msg_len, mHash);
-  KRML_HOST_IGNORE(msg_len);
+  Hacl_Hash_SHA2_hash_512(mHash, msg, msg_len);
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1814,8 +1790,8 @@ Hacl_P256_ecdsa_verif_without_hash(
 {
   uint64_t m_q[4U] = { 0U };
   uint8_t mHash[32U] = { 0U };
-  memcpy(mHash, msg, (uint32_t)32U * sizeof (uint8_t));
-  KRML_HOST_IGNORE(msg_len);
+  memcpy(mHash, msg, 32U * sizeof (uint8_t));
+  KRML_MAYBE_UNUSED_VAR(msg_len);
   uint8_t *mHash32 = mHash;
   bn_from_bytes_be4(m_q, mHash32);
   qmod_short(m_q, m_q);
@@ -1864,7 +1840,7 @@ bool Hacl_P256_validate_private_key(uint8_t *private_key)
   uint64_t bn_sk[4U] = { 0U };
   bn_from_bytes_be4(bn_sk, private_key);
   uint64_t res = bn_is_lt_order_and_gt_zero_mask4(bn_sk);
-  return res == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return res == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /*******************************************************************************
@@ -1893,11 +1869,11 @@ Convert a public key from uncompressed to its raw form.
 bool Hacl_P256_uncompressed_to_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint8_t pk0 = pk[0U];
-  if (pk0 != (uint8_t)0x04U)
+  if (pk0 != 0x04U)
   {
     return false;
   }
-  memcpy(pk_raw, pk + (uint32_t)1U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(pk_raw, pk + 1U, 64U * sizeof (uint8_t));
   return true;
 }
 
@@ -1915,12 +1891,12 @@ bool Hacl_P256_compressed_to_raw(uint8_t *pk, uint8_t *pk_raw)
 {
   uint64_t xa[4U] = { 0U };
   uint64_t ya[4U] = { 0U };
-  uint8_t *pk_xb = pk + (uint32_t)1U;
+  uint8_t *pk_xb = pk + 1U;
   bool b = aff_point_decompress_vartime(xa, ya, pk);
   if (b)
   {
-    memcpy(pk_raw, pk_xb, (uint32_t)32U * sizeof (uint8_t));
-    bn_to_bytes_be4(pk_raw + (uint32_t)32U, ya);
+    memcpy(pk_raw, pk_xb, 32U * sizeof (uint8_t));
+    bn_to_bytes_be4(pk_raw + 32U, ya);
   }
   return b;
 }
@@ -1935,8 +1911,8 @@ Convert a public key from raw to its uncompressed form.
 */
 void Hacl_P256_raw_to_uncompressed(uint8_t *pk_raw, uint8_t *pk)
 {
-  pk[0U] = (uint8_t)0x04U;
-  memcpy(pk + (uint32_t)1U, pk_raw, (uint32_t)64U * sizeof (uint8_t));
+  pk[0U] = 0x04U;
+  memcpy(pk + 1U, pk_raw, 64U * sizeof (uint8_t));
 }
 
 /**
@@ -1950,12 +1926,12 @@ Convert a public key from raw to its compressed form.
 void Hacl_P256_raw_to_compressed(uint8_t *pk_raw, uint8_t *pk)
 {
   uint8_t *pk_x = pk_raw;
-  uint8_t *pk_y = pk_raw + (uint32_t)32U;
+  uint8_t *pk_y = pk_raw + 32U;
   uint64_t bn_f[4U] = { 0U };
   bn_from_bytes_be4(bn_f, pk_y);
-  uint64_t is_odd_f = bn_f[0U] & (uint64_t)1U;
-  pk[0U] = (uint8_t)is_odd_f + (uint8_t)0x02U;
-  memcpy(pk + (uint32_t)1U, pk_x, (uint32_t)32U * sizeof (uint8_t));
+  uint64_t is_odd_f = bn_f[0U] & 1ULL;
+  pk[0U] = (uint32_t)(uint8_t)is_odd_f + 0x02U;
+  memcpy(pk + 1U, pk_x, 32U * sizeof (uint8_t));
 }
 
 
diff --git a/src/msvc/Hacl_Poly1305_32.c b/src/msvc/Hacl_Poly1305_32.c
deleted file mode 100644
index 5192559b..00000000
--- a/src/msvc/Hacl_Poly1305_32.c
+++ /dev/null
@@ -1,572 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Poly1305_32.h"
-
-void Hacl_Poly1305_32_poly1305_init(uint64_t *ctx, uint8_t *key)
-{
-  uint64_t *acc = ctx;
-  uint64_t *pre = ctx + (uint32_t)5U;
-  uint8_t *kr = key;
-  acc[0U] = (uint64_t)0U;
-  acc[1U] = (uint64_t)0U;
-  acc[2U] = (uint64_t)0U;
-  acc[3U] = (uint64_t)0U;
-  acc[4U] = (uint64_t)0U;
-  uint64_t u0 = load64_le(kr);
-  uint64_t lo = u0;
-  uint64_t u = load64_le(kr + (uint32_t)8U);
-  uint64_t hi = u;
-  uint64_t mask0 = (uint64_t)0x0ffffffc0fffffffU;
-  uint64_t mask1 = (uint64_t)0x0ffffffc0ffffffcU;
-  uint64_t lo1 = lo & mask0;
-  uint64_t hi1 = hi & mask1;
-  uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
-  uint64_t *rn = pre + (uint32_t)10U;
-  uint64_t *rn_5 = pre + (uint32_t)15U;
-  uint64_t r_vec0 = lo1;
-  uint64_t r_vec1 = hi1;
-  uint64_t f00 = r_vec0 & (uint64_t)0x3ffffffU;
-  uint64_t f10 = r_vec0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = r_vec0 >> (uint32_t)52U | (r_vec1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = r_vec1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = r_vec1 >> (uint32_t)40U;
-  uint64_t f0 = f00;
-  uint64_t f1 = f10;
-  uint64_t f2 = f20;
-  uint64_t f3 = f30;
-  uint64_t f4 = f40;
-  r[0U] = f0;
-  r[1U] = f1;
-  r[2U] = f2;
-  r[3U] = f3;
-  r[4U] = f4;
-  uint64_t f200 = r[0U];
-  uint64_t f21 = r[1U];
-  uint64_t f22 = r[2U];
-  uint64_t f23 = r[3U];
-  uint64_t f24 = r[4U];
-  r5[0U] = f200 * (uint64_t)5U;
-  r5[1U] = f21 * (uint64_t)5U;
-  r5[2U] = f22 * (uint64_t)5U;
-  r5[3U] = f23 * (uint64_t)5U;
-  r5[4U] = f24 * (uint64_t)5U;
-  rn[0U] = r[0U];
-  rn[1U] = r[1U];
-  rn[2U] = r[2U];
-  rn[3U] = r[3U];
-  rn[4U] = r[4U];
-  rn_5[0U] = r5[0U];
-  rn_5[1U] = r5[1U];
-  rn_5[2U] = r5[2U];
-  rn_5[3U] = r5[3U];
-  rn_5[4U] = r5[4U];
-}
-
-void Hacl_Poly1305_32_poly1305_update1(uint64_t *ctx, uint8_t *text)
-{
-  uint64_t *pre = ctx + (uint32_t)5U;
-  uint64_t *acc = ctx;
-  uint64_t e[5U] = { 0U };
-  uint64_t u0 = load64_le(text);
-  uint64_t lo = u0;
-  uint64_t u = load64_le(text + (uint32_t)8U);
-  uint64_t hi = u;
-  uint64_t f0 = lo;
-  uint64_t f1 = hi;
-  uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-  uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-  uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-  uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-  uint64_t f40 = f1 >> (uint32_t)40U;
-  uint64_t f01 = f010;
-  uint64_t f111 = f110;
-  uint64_t f2 = f20;
-  uint64_t f3 = f30;
-  uint64_t f41 = f40;
-  e[0U] = f01;
-  e[1U] = f111;
-  e[2U] = f2;
-  e[3U] = f3;
-  e[4U] = f41;
-  uint64_t b = (uint64_t)0x1000000U;
-  uint64_t mask = b;
-  uint64_t f4 = e[4U];
-  e[4U] = f4 | mask;
-  uint64_t *r = pre;
-  uint64_t *r5 = pre + (uint32_t)5U;
-  uint64_t r0 = r[0U];
-  uint64_t r1 = r[1U];
-  uint64_t r2 = r[2U];
-  uint64_t r3 = r[3U];
-  uint64_t r4 = r[4U];
-  uint64_t r51 = r5[1U];
-  uint64_t r52 = r5[2U];
-  uint64_t r53 = r5[3U];
-  uint64_t r54 = r5[4U];
-  uint64_t f10 = e[0U];
-  uint64_t f11 = e[1U];
-  uint64_t f12 = e[2U];
-  uint64_t f13 = e[3U];
-  uint64_t f14 = e[4U];
-  uint64_t a0 = acc[0U];
-  uint64_t a1 = acc[1U];
-  uint64_t a2 = acc[2U];
-  uint64_t a3 = acc[3U];
-  uint64_t a4 = acc[4U];
-  uint64_t a01 = a0 + f10;
-  uint64_t a11 = a1 + f11;
-  uint64_t a21 = a2 + f12;
-  uint64_t a31 = a3 + f13;
-  uint64_t a41 = a4 + f14;
-  uint64_t a02 = r0 * a01;
-  uint64_t a12 = r1 * a01;
-  uint64_t a22 = r2 * a01;
-  uint64_t a32 = r3 * a01;
-  uint64_t a42 = r4 * a01;
-  uint64_t a03 = a02 + r54 * a11;
-  uint64_t a13 = a12 + r0 * a11;
-  uint64_t a23 = a22 + r1 * a11;
-  uint64_t a33 = a32 + r2 * a11;
-  uint64_t a43 = a42 + r3 * a11;
-  uint64_t a04 = a03 + r53 * a21;
-  uint64_t a14 = a13 + r54 * a21;
-  uint64_t a24 = a23 + r0 * a21;
-  uint64_t a34 = a33 + r1 * a21;
-  uint64_t a44 = a43 + r2 * a21;
-  uint64_t a05 = a04 + r52 * a31;
-  uint64_t a15 = a14 + r53 * a31;
-  uint64_t a25 = a24 + r54 * a31;
-  uint64_t a35 = a34 + r0 * a31;
-  uint64_t a45 = a44 + r1 * a31;
-  uint64_t a06 = a05 + r51 * a41;
-  uint64_t a16 = a15 + r52 * a41;
-  uint64_t a26 = a25 + r53 * a41;
-  uint64_t a36 = a35 + r54 * a41;
-  uint64_t a46 = a45 + r0 * a41;
-  uint64_t t0 = a06;
-  uint64_t t1 = a16;
-  uint64_t t2 = a26;
-  uint64_t t3 = a36;
-  uint64_t t4 = a46;
-  uint64_t mask26 = (uint64_t)0x3ffffffU;
-  uint64_t z0 = t0 >> (uint32_t)26U;
-  uint64_t z1 = t3 >> (uint32_t)26U;
-  uint64_t x0 = t0 & mask26;
-  uint64_t x3 = t3 & mask26;
-  uint64_t x1 = t1 + z0;
-  uint64_t x4 = t4 + z1;
-  uint64_t z01 = x1 >> (uint32_t)26U;
-  uint64_t z11 = x4 >> (uint32_t)26U;
-  uint64_t t = z11 << (uint32_t)2U;
-  uint64_t z12 = z11 + t;
-  uint64_t x11 = x1 & mask26;
-  uint64_t x41 = x4 & mask26;
-  uint64_t x2 = t2 + z01;
-  uint64_t x01 = x0 + z12;
-  uint64_t z02 = x2 >> (uint32_t)26U;
-  uint64_t z13 = x01 >> (uint32_t)26U;
-  uint64_t x21 = x2 & mask26;
-  uint64_t x02 = x01 & mask26;
-  uint64_t x31 = x3 + z02;
-  uint64_t x12 = x11 + z13;
-  uint64_t z03 = x31 >> (uint32_t)26U;
-  uint64_t x32 = x31 & mask26;
-  uint64_t x42 = x41 + z03;
-  uint64_t o0 = x02;
-  uint64_t o1 = x12;
-  uint64_t o2 = x21;
-  uint64_t o3 = x32;
-  uint64_t o4 = x42;
-  acc[0U] = o0;
-  acc[1U] = o1;
-  acc[2U] = o2;
-  acc[3U] = o3;
-  acc[4U] = o4;
-}
-
-void Hacl_Poly1305_32_poly1305_update(uint64_t *ctx, uint32_t len, uint8_t *text)
-{
-  uint64_t *pre = ctx + (uint32_t)5U;
-  uint64_t *acc = ctx;
-  uint32_t nb = len / (uint32_t)16U;
-  uint32_t rem = len % (uint32_t)16U;
-  for (uint32_t i = (uint32_t)0U; i < nb; i++)
-  {
-    uint8_t *block = text + i * (uint32_t)16U;
-    uint64_t e[5U] = { 0U };
-    uint64_t u0 = load64_le(block);
-    uint64_t lo = u0;
-    uint64_t u = load64_le(block + (uint32_t)8U);
-    uint64_t hi = u;
-    uint64_t f0 = lo;
-    uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
-    uint64_t f01 = f010;
-    uint64_t f111 = f110;
-    uint64_t f2 = f20;
-    uint64_t f3 = f30;
-    uint64_t f41 = f40;
-    e[0U] = f01;
-    e[1U] = f111;
-    e[2U] = f2;
-    e[3U] = f3;
-    e[4U] = f41;
-    uint64_t b = (uint64_t)0x1000000U;
-    uint64_t mask = b;
-    uint64_t f4 = e[4U];
-    e[4U] = f4 | mask;
-    uint64_t *r = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
-    uint64_t r0 = r[0U];
-    uint64_t r1 = r[1U];
-    uint64_t r2 = r[2U];
-    uint64_t r3 = r[3U];
-    uint64_t r4 = r[4U];
-    uint64_t r51 = r5[1U];
-    uint64_t r52 = r5[2U];
-    uint64_t r53 = r5[3U];
-    uint64_t r54 = r5[4U];
-    uint64_t f10 = e[0U];
-    uint64_t f11 = e[1U];
-    uint64_t f12 = e[2U];
-    uint64_t f13 = e[3U];
-    uint64_t f14 = e[4U];
-    uint64_t a0 = acc[0U];
-    uint64_t a1 = acc[1U];
-    uint64_t a2 = acc[2U];
-    uint64_t a3 = acc[3U];
-    uint64_t a4 = acc[4U];
-    uint64_t a01 = a0 + f10;
-    uint64_t a11 = a1 + f11;
-    uint64_t a21 = a2 + f12;
-    uint64_t a31 = a3 + f13;
-    uint64_t a41 = a4 + f14;
-    uint64_t a02 = r0 * a01;
-    uint64_t a12 = r1 * a01;
-    uint64_t a22 = r2 * a01;
-    uint64_t a32 = r3 * a01;
-    uint64_t a42 = r4 * a01;
-    uint64_t a03 = a02 + r54 * a11;
-    uint64_t a13 = a12 + r0 * a11;
-    uint64_t a23 = a22 + r1 * a11;
-    uint64_t a33 = a32 + r2 * a11;
-    uint64_t a43 = a42 + r3 * a11;
-    uint64_t a04 = a03 + r53 * a21;
-    uint64_t a14 = a13 + r54 * a21;
-    uint64_t a24 = a23 + r0 * a21;
-    uint64_t a34 = a33 + r1 * a21;
-    uint64_t a44 = a43 + r2 * a21;
-    uint64_t a05 = a04 + r52 * a31;
-    uint64_t a15 = a14 + r53 * a31;
-    uint64_t a25 = a24 + r54 * a31;
-    uint64_t a35 = a34 + r0 * a31;
-    uint64_t a45 = a44 + r1 * a31;
-    uint64_t a06 = a05 + r51 * a41;
-    uint64_t a16 = a15 + r52 * a41;
-    uint64_t a26 = a25 + r53 * a41;
-    uint64_t a36 = a35 + r54 * a41;
-    uint64_t a46 = a45 + r0 * a41;
-    uint64_t t0 = a06;
-    uint64_t t1 = a16;
-    uint64_t t2 = a26;
-    uint64_t t3 = a36;
-    uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
-    uint64_t x0 = t0 & mask26;
-    uint64_t x3 = t3 & mask26;
-    uint64_t x1 = t1 + z0;
-    uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
-    uint64_t z12 = z11 + t;
-    uint64_t x11 = x1 & mask26;
-    uint64_t x41 = x4 & mask26;
-    uint64_t x2 = t2 + z01;
-    uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
-    uint64_t x21 = x2 & mask26;
-    uint64_t x02 = x01 & mask26;
-    uint64_t x31 = x3 + z02;
-    uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
-    uint64_t x32 = x31 & mask26;
-    uint64_t x42 = x41 + z03;
-    uint64_t o0 = x02;
-    uint64_t o1 = x12;
-    uint64_t o2 = x21;
-    uint64_t o3 = x32;
-    uint64_t o4 = x42;
-    acc[0U] = o0;
-    acc[1U] = o1;
-    acc[2U] = o2;
-    acc[3U] = o3;
-    acc[4U] = o4;
-  }
-  if (rem > (uint32_t)0U)
-  {
-    uint8_t *last = text + nb * (uint32_t)16U;
-    uint64_t e[5U] = { 0U };
-    uint8_t tmp[16U] = { 0U };
-    memcpy(tmp, last, rem * sizeof (uint8_t));
-    uint64_t u0 = load64_le(tmp);
-    uint64_t lo = u0;
-    uint64_t u = load64_le(tmp + (uint32_t)8U);
-    uint64_t hi = u;
-    uint64_t f0 = lo;
-    uint64_t f1 = hi;
-    uint64_t f010 = f0 & (uint64_t)0x3ffffffU;
-    uint64_t f110 = f0 >> (uint32_t)26U & (uint64_t)0x3ffffffU;
-    uint64_t f20 = f0 >> (uint32_t)52U | (f1 & (uint64_t)0x3fffU) << (uint32_t)12U;
-    uint64_t f30 = f1 >> (uint32_t)14U & (uint64_t)0x3ffffffU;
-    uint64_t f40 = f1 >> (uint32_t)40U;
-    uint64_t f01 = f010;
-    uint64_t f111 = f110;
-    uint64_t f2 = f20;
-    uint64_t f3 = f30;
-    uint64_t f4 = f40;
-    e[0U] = f01;
-    e[1U] = f111;
-    e[2U] = f2;
-    e[3U] = f3;
-    e[4U] = f4;
-    uint64_t b = (uint64_t)1U << rem * (uint32_t)8U % (uint32_t)26U;
-    uint64_t mask = b;
-    uint64_t fi = e[rem * (uint32_t)8U / (uint32_t)26U];
-    e[rem * (uint32_t)8U / (uint32_t)26U] = fi | mask;
-    uint64_t *r = pre;
-    uint64_t *r5 = pre + (uint32_t)5U;
-    uint64_t r0 = r[0U];
-    uint64_t r1 = r[1U];
-    uint64_t r2 = r[2U];
-    uint64_t r3 = r[3U];
-    uint64_t r4 = r[4U];
-    uint64_t r51 = r5[1U];
-    uint64_t r52 = r5[2U];
-    uint64_t r53 = r5[3U];
-    uint64_t r54 = r5[4U];
-    uint64_t f10 = e[0U];
-    uint64_t f11 = e[1U];
-    uint64_t f12 = e[2U];
-    uint64_t f13 = e[3U];
-    uint64_t f14 = e[4U];
-    uint64_t a0 = acc[0U];
-    uint64_t a1 = acc[1U];
-    uint64_t a2 = acc[2U];
-    uint64_t a3 = acc[3U];
-    uint64_t a4 = acc[4U];
-    uint64_t a01 = a0 + f10;
-    uint64_t a11 = a1 + f11;
-    uint64_t a21 = a2 + f12;
-    uint64_t a31 = a3 + f13;
-    uint64_t a41 = a4 + f14;
-    uint64_t a02 = r0 * a01;
-    uint64_t a12 = r1 * a01;
-    uint64_t a22 = r2 * a01;
-    uint64_t a32 = r3 * a01;
-    uint64_t a42 = r4 * a01;
-    uint64_t a03 = a02 + r54 * a11;
-    uint64_t a13 = a12 + r0 * a11;
-    uint64_t a23 = a22 + r1 * a11;
-    uint64_t a33 = a32 + r2 * a11;
-    uint64_t a43 = a42 + r3 * a11;
-    uint64_t a04 = a03 + r53 * a21;
-    uint64_t a14 = a13 + r54 * a21;
-    uint64_t a24 = a23 + r0 * a21;
-    uint64_t a34 = a33 + r1 * a21;
-    uint64_t a44 = a43 + r2 * a21;
-    uint64_t a05 = a04 + r52 * a31;
-    uint64_t a15 = a14 + r53 * a31;
-    uint64_t a25 = a24 + r54 * a31;
-    uint64_t a35 = a34 + r0 * a31;
-    uint64_t a45 = a44 + r1 * a31;
-    uint64_t a06 = a05 + r51 * a41;
-    uint64_t a16 = a15 + r52 * a41;
-    uint64_t a26 = a25 + r53 * a41;
-    uint64_t a36 = a35 + r54 * a41;
-    uint64_t a46 = a45 + r0 * a41;
-    uint64_t t0 = a06;
-    uint64_t t1 = a16;
-    uint64_t t2 = a26;
-    uint64_t t3 = a36;
-    uint64_t t4 = a46;
-    uint64_t mask26 = (uint64_t)0x3ffffffU;
-    uint64_t z0 = t0 >> (uint32_t)26U;
-    uint64_t z1 = t3 >> (uint32_t)26U;
-    uint64_t x0 = t0 & mask26;
-    uint64_t x3 = t3 & mask26;
-    uint64_t x1 = t1 + z0;
-    uint64_t x4 = t4 + z1;
-    uint64_t z01 = x1 >> (uint32_t)26U;
-    uint64_t z11 = x4 >> (uint32_t)26U;
-    uint64_t t = z11 << (uint32_t)2U;
-    uint64_t z12 = z11 + t;
-    uint64_t x11 = x1 & mask26;
-    uint64_t x41 = x4 & mask26;
-    uint64_t x2 = t2 + z01;
-    uint64_t x01 = x0 + z12;
-    uint64_t z02 = x2 >> (uint32_t)26U;
-    uint64_t z13 = x01 >> (uint32_t)26U;
-    uint64_t x21 = x2 & mask26;
-    uint64_t x02 = x01 & mask26;
-    uint64_t x31 = x3 + z02;
-    uint64_t x12 = x11 + z13;
-    uint64_t z03 = x31 >> (uint32_t)26U;
-    uint64_t x32 = x31 & mask26;
-    uint64_t x42 = x41 + z03;
-    uint64_t o0 = x02;
-    uint64_t o1 = x12;
-    uint64_t o2 = x21;
-    uint64_t o3 = x32;
-    uint64_t o4 = x42;
-    acc[0U] = o0;
-    acc[1U] = o1;
-    acc[2U] = o2;
-    acc[3U] = o3;
-    acc[4U] = o4;
-    return;
-  }
-}
-
-void Hacl_Poly1305_32_poly1305_finish(uint8_t *tag, uint8_t *key, uint64_t *ctx)
-{
-  uint64_t *acc = ctx;
-  uint8_t *ks = key + (uint32_t)16U;
-  uint64_t f0 = acc[0U];
-  uint64_t f13 = acc[1U];
-  uint64_t f23 = acc[2U];
-  uint64_t f33 = acc[3U];
-  uint64_t f40 = acc[4U];
-  uint64_t l0 = f0 + (uint64_t)0U;
-  uint64_t tmp00 = l0 & (uint64_t)0x3ffffffU;
-  uint64_t c00 = l0 >> (uint32_t)26U;
-  uint64_t l1 = f13 + c00;
-  uint64_t tmp10 = l1 & (uint64_t)0x3ffffffU;
-  uint64_t c10 = l1 >> (uint32_t)26U;
-  uint64_t l2 = f23 + c10;
-  uint64_t tmp20 = l2 & (uint64_t)0x3ffffffU;
-  uint64_t c20 = l2 >> (uint32_t)26U;
-  uint64_t l3 = f33 + c20;
-  uint64_t tmp30 = l3 & (uint64_t)0x3ffffffU;
-  uint64_t c30 = l3 >> (uint32_t)26U;
-  uint64_t l4 = f40 + c30;
-  uint64_t tmp40 = l4 & (uint64_t)0x3ffffffU;
-  uint64_t c40 = l4 >> (uint32_t)26U;
-  uint64_t f010 = tmp00 + c40 * (uint64_t)5U;
-  uint64_t f110 = tmp10;
-  uint64_t f210 = tmp20;
-  uint64_t f310 = tmp30;
-  uint64_t f410 = tmp40;
-  uint64_t l = f010 + (uint64_t)0U;
-  uint64_t tmp0 = l & (uint64_t)0x3ffffffU;
-  uint64_t c0 = l >> (uint32_t)26U;
-  uint64_t l5 = f110 + c0;
-  uint64_t tmp1 = l5 & (uint64_t)0x3ffffffU;
-  uint64_t c1 = l5 >> (uint32_t)26U;
-  uint64_t l6 = f210 + c1;
-  uint64_t tmp2 = l6 & (uint64_t)0x3ffffffU;
-  uint64_t c2 = l6 >> (uint32_t)26U;
-  uint64_t l7 = f310 + c2;
-  uint64_t tmp3 = l7 & (uint64_t)0x3ffffffU;
-  uint64_t c3 = l7 >> (uint32_t)26U;
-  uint64_t l8 = f410 + c3;
-  uint64_t tmp4 = l8 & (uint64_t)0x3ffffffU;
-  uint64_t c4 = l8 >> (uint32_t)26U;
-  uint64_t f02 = tmp0 + c4 * (uint64_t)5U;
-  uint64_t f12 = tmp1;
-  uint64_t f22 = tmp2;
-  uint64_t f32 = tmp3;
-  uint64_t f42 = tmp4;
-  uint64_t mh = (uint64_t)0x3ffffffU;
-  uint64_t ml = (uint64_t)0x3fffffbU;
-  uint64_t mask = FStar_UInt64_eq_mask(f42, mh);
-  uint64_t mask1 = mask & FStar_UInt64_eq_mask(f32, mh);
-  uint64_t mask2 = mask1 & FStar_UInt64_eq_mask(f22, mh);
-  uint64_t mask3 = mask2 & FStar_UInt64_eq_mask(f12, mh);
-  uint64_t mask4 = mask3 & ~~FStar_UInt64_gte_mask(f02, ml);
-  uint64_t ph = mask4 & mh;
-  uint64_t pl = mask4 & ml;
-  uint64_t o0 = f02 - pl;
-  uint64_t o1 = f12 - ph;
-  uint64_t o2 = f22 - ph;
-  uint64_t o3 = f32 - ph;
-  uint64_t o4 = f42 - ph;
-  uint64_t f011 = o0;
-  uint64_t f111 = o1;
-  uint64_t f211 = o2;
-  uint64_t f311 = o3;
-  uint64_t f411 = o4;
-  acc[0U] = f011;
-  acc[1U] = f111;
-  acc[2U] = f211;
-  acc[3U] = f311;
-  acc[4U] = f411;
-  uint64_t f00 = acc[0U];
-  uint64_t f1 = acc[1U];
-  uint64_t f2 = acc[2U];
-  uint64_t f3 = acc[3U];
-  uint64_t f4 = acc[4U];
-  uint64_t f01 = f00;
-  uint64_t f112 = f1;
-  uint64_t f212 = f2;
-  uint64_t f312 = f3;
-  uint64_t f41 = f4;
-  uint64_t lo = (f01 | f112 << (uint32_t)26U) | f212 << (uint32_t)52U;
-  uint64_t hi = (f212 >> (uint32_t)12U | f312 << (uint32_t)14U) | f41 << (uint32_t)40U;
-  uint64_t f10 = lo;
-  uint64_t f11 = hi;
-  uint64_t u0 = load64_le(ks);
-  uint64_t lo0 = u0;
-  uint64_t u = load64_le(ks + (uint32_t)8U);
-  uint64_t hi0 = u;
-  uint64_t f20 = lo0;
-  uint64_t f21 = hi0;
-  uint64_t r0 = f10 + f20;
-  uint64_t r1 = f11 + f21;
-  uint64_t c = (r0 ^ ((r0 ^ f20) | ((r0 - f20) ^ f20))) >> (uint32_t)63U;
-  uint64_t r11 = r1 + c;
-  uint64_t f30 = r0;
-  uint64_t f31 = r11;
-  store64_le(tag, f30);
-  store64_le(tag + (uint32_t)8U, f31);
-}
-
-void Hacl_Poly1305_32_poly1305_mac(uint8_t *tag, uint32_t len, uint8_t *text, uint8_t *key)
-{
-  uint64_t ctx[25U] = { 0U };
-  Hacl_Poly1305_32_poly1305_init(ctx, key);
-  Hacl_Poly1305_32_poly1305_update(ctx, len, text);
-  Hacl_Poly1305_32_poly1305_finish(tag, key, ctx);
-}
-
diff --git a/src/msvc/Hacl_RSAPSS.c b/src/msvc/Hacl_RSAPSS.c
index 084f10b3..cd19195d 100644
--- a/src/msvc/Hacl_RSAPSS.c
+++ b/src/msvc/Hacl_RSAPSS.c
@@ -35,51 +35,51 @@ static inline uint32_t hash_len(Spec_Hash_Definitions_hash_alg a)
   {
     case Spec_Hash_Definitions_MD5:
       {
-        return (uint32_t)16U;
+        return 16U;
       }
     case Spec_Hash_Definitions_SHA1:
       {
-        return (uint32_t)20U;
+        return 20U;
       }
     case Spec_Hash_Definitions_SHA2_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA2_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_Blake2S:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_Blake2B:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     case Spec_Hash_Definitions_SHA3_224:
       {
-        return (uint32_t)28U;
+        return 28U;
       }
     case Spec_Hash_Definitions_SHA3_256:
       {
-        return (uint32_t)32U;
+        return 32U;
       }
     case Spec_Hash_Definitions_SHA3_384:
       {
-        return (uint32_t)48U;
+        return 48U;
       }
     case Spec_Hash_Definitions_SHA3_512:
       {
-        return (uint32_t)64U;
+        return 64U;
       }
     default:
       {
@@ -96,17 +96,17 @@ hash(Spec_Hash_Definitions_hash_alg a, uint8_t *mHash, uint32_t msgLen, uint8_t
   {
     case Spec_Hash_Definitions_SHA2_256:
       {
-        Hacl_Streaming_SHA2_hash_256(msg, msgLen, mHash);
+        Hacl_Hash_SHA2_hash_256(mHash, msg, msgLen);
         break;
       }
     case Spec_Hash_Definitions_SHA2_384:
       {
-        Hacl_Streaming_SHA2_hash_384(msg, msgLen, mHash);
+        Hacl_Hash_SHA2_hash_384(mHash, msg, msgLen);
         break;
       }
     case Spec_Hash_Definitions_SHA2_512:
       {
-        Hacl_Streaming_SHA2_hash_512(msg, msgLen, mHash);
+        Hacl_Hash_SHA2_hash_512(mHash, msg, msgLen);
         break;
       }
     default:
@@ -126,48 +126,48 @@ mgf_hash(
   uint8_t *res
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint8_t), len + (uint32_t)4U);
-  uint8_t *mgfseed_counter = (uint8_t *)alloca((len + (uint32_t)4U) * sizeof (uint8_t));
-  memset(mgfseed_counter, 0U, (len + (uint32_t)4U) * sizeof (uint8_t));
+  KRML_CHECK_SIZE(sizeof (uint8_t), len + 4U);
+  uint8_t *mgfseed_counter = (uint8_t *)alloca((len + 4U) * sizeof (uint8_t));
+  memset(mgfseed_counter, 0U, (len + 4U) * sizeof (uint8_t));
   memcpy(mgfseed_counter, mgfseed, len * sizeof (uint8_t));
   uint32_t hLen = hash_len(a);
-  uint32_t n = (maskLen - (uint32_t)1U) / hLen + (uint32_t)1U;
+  uint32_t n = (maskLen - 1U) / hLen + 1U;
   uint32_t accLen = n * hLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), accLen);
   uint8_t *acc = (uint8_t *)alloca(accLen * sizeof (uint8_t));
   memset(acc, 0U, accLen * sizeof (uint8_t));
-  for (uint32_t i = (uint32_t)0U; i < n; i++)
+  for (uint32_t i = 0U; i < n; i++)
   {
     uint8_t *acc_i = acc + i * hLen;
     uint8_t *c = mgfseed_counter + len;
-    c[0U] = (uint8_t)(i >> (uint32_t)24U);
-    c[1U] = (uint8_t)(i >> (uint32_t)16U);
-    c[2U] = (uint8_t)(i >> (uint32_t)8U);
+    c[0U] = (uint8_t)(i >> 24U);
+    c[1U] = (uint8_t)(i >> 16U);
+    c[2U] = (uint8_t)(i >> 8U);
     c[3U] = (uint8_t)i;
-    hash(a, acc_i, len + (uint32_t)4U, mgfseed_counter);
+    hash(a, acc_i, len + 4U, mgfseed_counter);
   }
   memcpy(res, acc, maskLen * sizeof (uint8_t));
 }
 
 static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b)
 {
-  uint32_t bLen = (bs - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  if (bs == (uint32_t)64U * bLen)
+  uint32_t bLen = (bs - 1U) / 64U + 1U;
+  if (bs == 64U * bLen)
   {
-    return (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    return 0xFFFFFFFFFFFFFFFFULL;
   }
   KRML_CHECK_SIZE(sizeof (uint64_t), bLen);
   uint64_t *b2 = (uint64_t *)alloca(bLen * sizeof (uint64_t));
   memset(b2, 0U, bLen * sizeof (uint64_t));
-  uint32_t i0 = bs / (uint32_t)64U;
-  uint32_t j = bs % (uint32_t)64U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < bLen; i++)
+  uint32_t i0 = bs / 64U;
+  uint32_t j = bs % 64U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < bLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b[i], b2[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b[i], b2[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc;
   return res;
@@ -175,21 +175,21 @@ static inline uint64_t check_num_bits_u64(uint32_t bs, uint64_t *b)
 
 static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n)
 {
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint64_t bits0 = n[0U] & (uint64_t)1U;
-  uint64_t m0 = (uint64_t)0U - bits0;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint64_t bits0 = n[0U] & 1ULL;
+  uint64_t m0 = 0ULL - bits0;
   KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
   uint64_t *b2 = (uint64_t *)alloca(nLen * sizeof (uint64_t));
   memset(b2, 0U, nLen * sizeof (uint64_t));
-  uint32_t i0 = (modBits - (uint32_t)1U) / (uint32_t)64U;
-  uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U;
-  b2[i0] = b2[i0] | (uint64_t)1U << j;
-  uint64_t acc = (uint64_t)0U;
-  for (uint32_t i = (uint32_t)0U; i < nLen; i++)
+  uint32_t i0 = (modBits - 1U) / 64U;
+  uint32_t j = (modBits - 1U) % 64U;
+  b2[i0] = b2[i0] | 1ULL << j;
+  uint64_t acc = 0ULL;
+  for (uint32_t i = 0U; i < nLen; i++)
   {
     uint64_t beq = FStar_UInt64_eq_mask(b2[i], n[i]);
     uint64_t blt = ~FStar_UInt64_gte_mask(b2[i], n[i]);
-    acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+    acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
   }
   uint64_t res = acc;
   uint64_t m1 = res;
@@ -199,12 +199,12 @@ static inline uint64_t check_modulus_u64(uint32_t modBits, uint64_t *n)
 
 static inline uint64_t check_exponent_u64(uint32_t eBits, uint64_t *e)
 {
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   KRML_CHECK_SIZE(sizeof (uint64_t), eLen);
   uint64_t *bn_zero = (uint64_t *)alloca(eLen * sizeof (uint64_t));
   memset(bn_zero, 0U, eLen * sizeof (uint64_t));
-  uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-  for (uint32_t i = (uint32_t)0U; i < eLen; i++)
+  uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+  for (uint32_t i = 0U; i < eLen; i++)
   {
     uint64_t uu____0 = FStar_UInt64_eq_mask(e[i], bn_zero[i]);
     mask = uu____0 & mask;
@@ -231,39 +231,39 @@ pss_encode(
   KRML_CHECK_SIZE(sizeof (uint8_t), hLen);
   uint8_t *m1Hash = (uint8_t *)alloca(hLen * sizeof (uint8_t));
   memset(m1Hash, 0U, hLen * sizeof (uint8_t));
-  uint32_t m1Len = (uint32_t)8U + hLen + saltLen;
+  uint32_t m1Len = 8U + hLen + saltLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), m1Len);
   uint8_t *m1 = (uint8_t *)alloca(m1Len * sizeof (uint8_t));
   memset(m1, 0U, m1Len * sizeof (uint8_t));
-  hash(a, m1 + (uint32_t)8U, msgLen, msg);
-  memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t));
+  hash(a, m1 + 8U, msgLen, msg);
+  memcpy(m1 + 8U + hLen, salt, saltLen * sizeof (uint8_t));
   hash(a, m1Hash, m1Len, m1);
-  uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t dbLen = emLen - hLen - (uint32_t)1U;
+  uint32_t emLen = (emBits - 1U) / 8U + 1U;
+  uint32_t dbLen = emLen - hLen - 1U;
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t *db = (uint8_t *)alloca(dbLen * sizeof (uint8_t));
   memset(db, 0U, dbLen * sizeof (uint8_t));
-  uint32_t last_before_salt = dbLen - saltLen - (uint32_t)1U;
-  db[last_before_salt] = (uint8_t)1U;
-  memcpy(db + last_before_salt + (uint32_t)1U, salt, saltLen * sizeof (uint8_t));
+  uint32_t last_before_salt = dbLen - saltLen - 1U;
+  db[last_before_salt] = 1U;
+  memcpy(db + last_before_salt + 1U, salt, saltLen * sizeof (uint8_t));
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t *dbMask = (uint8_t *)alloca(dbLen * sizeof (uint8_t));
   memset(dbMask, 0U, dbLen * sizeof (uint8_t));
   mgf_hash(a, hLen, m1Hash, dbLen, dbMask);
-  for (uint32_t i = (uint32_t)0U; i < dbLen; i++)
+  for (uint32_t i = 0U; i < dbLen; i++)
   {
     uint8_t *os = db;
-    uint8_t x = db[i] ^ dbMask[i];
+    uint8_t x = (uint32_t)db[i] ^ (uint32_t)dbMask[i];
     os[i] = x;
   }
-  uint32_t msBits = emBits % (uint32_t)8U;
-  if (msBits > (uint32_t)0U)
+  uint32_t msBits = emBits % 8U;
+  if (msBits > 0U)
   {
-    db[0U] = db[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits);
+    db[0U] = (uint32_t)db[0U] & 0xffU >> (8U - msBits);
   }
   memcpy(em, db, dbLen * sizeof (uint8_t));
   memcpy(em + dbLen, m1Hash, hLen * sizeof (uint8_t));
-  em[emLen - (uint32_t)1U] = (uint8_t)0xbcU;
+  em[emLen - 1U] = 0xbcU;
 }
 
 static inline bool
@@ -276,105 +276,100 @@ pss_verify(
   uint8_t *em
 )
 {
-  uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t msBits = emBits % (uint32_t)8U;
+  uint32_t emLen = (emBits - 1U) / 8U + 1U;
+  uint32_t msBits = emBits % 8U;
   uint8_t em_0;
-  if (msBits > (uint32_t)0U)
+  if (msBits > 0U)
   {
-    em_0 = em[0U] & (uint8_t)0xffU << msBits;
+    em_0 = (uint32_t)em[0U] & 0xffU << msBits;
   }
   else
   {
-    em_0 = (uint8_t)0U;
+    em_0 = 0U;
   }
-  uint8_t em_last = em[emLen - (uint32_t)1U];
-  if (emLen < saltLen + hash_len(a) + (uint32_t)2U)
+  uint8_t em_last = em[emLen - 1U];
+  if (emLen < saltLen + hash_len(a) + 2U)
   {
     return false;
   }
-  if (!(em_last == (uint8_t)0xbcU && em_0 == (uint8_t)0U))
+  if (!(em_last == 0xbcU && em_0 == 0U))
   {
     return false;
   }
-  uint32_t emLen1 = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  uint32_t emLen1 = (emBits - 1U) / 8U + 1U;
   uint32_t hLen = hash_len(a);
   KRML_CHECK_SIZE(sizeof (uint8_t), hLen);
   uint8_t *m1Hash0 = (uint8_t *)alloca(hLen * sizeof (uint8_t));
   memset(m1Hash0, 0U, hLen * sizeof (uint8_t));
-  uint32_t dbLen = emLen1 - hLen - (uint32_t)1U;
+  uint32_t dbLen = emLen1 - hLen - 1U;
   uint8_t *maskedDB = em;
   uint8_t *m1Hash = em + dbLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), dbLen);
   uint8_t *dbMask = (uint8_t *)alloca(dbLen * sizeof (uint8_t));
   memset(dbMask, 0U, dbLen * sizeof (uint8_t));
   mgf_hash(a, hLen, m1Hash, dbLen, dbMask);
-  for (uint32_t i = (uint32_t)0U; i < dbLen; i++)
+  for (uint32_t i = 0U; i < dbLen; i++)
   {
     uint8_t *os = dbMask;
-    uint8_t x = dbMask[i] ^ maskedDB[i];
+    uint8_t x = (uint32_t)dbMask[i] ^ (uint32_t)maskedDB[i];
     os[i] = x;
   }
-  uint32_t msBits1 = emBits % (uint32_t)8U;
-  if (msBits1 > (uint32_t)0U)
+  uint32_t msBits1 = emBits % 8U;
+  if (msBits1 > 0U)
   {
-    dbMask[0U] = dbMask[0U] & (uint8_t)0xffU >> ((uint32_t)8U - msBits1);
+    dbMask[0U] = (uint32_t)dbMask[0U] & 0xffU >> (8U - msBits1);
   }
-  uint32_t padLen = emLen1 - saltLen - hLen - (uint32_t)1U;
+  uint32_t padLen = emLen1 - saltLen - hLen - 1U;
   KRML_CHECK_SIZE(sizeof (uint8_t), padLen);
   uint8_t *pad2 = (uint8_t *)alloca(padLen * sizeof (uint8_t));
   memset(pad2, 0U, padLen * sizeof (uint8_t));
-  pad2[padLen - (uint32_t)1U] = (uint8_t)0x01U;
+  pad2[padLen - 1U] = 0x01U;
   uint8_t *pad = dbMask;
   uint8_t *salt = dbMask + padLen;
-  uint8_t res = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < padLen; i++)
+  uint8_t res = 255U;
+  for (uint32_t i = 0U; i < padLen; i++)
   {
     uint8_t uu____0 = FStar_UInt8_eq_mask(pad[i], pad2[i]);
-    res = uu____0 & res;
+    res = (uint32_t)uu____0 & (uint32_t)res;
   }
   uint8_t z = res;
-  if (!(z == (uint8_t)255U))
+  if (!(z == 255U))
   {
     return false;
   }
-  uint32_t m1Len = (uint32_t)8U + hLen + saltLen;
+  uint32_t m1Len = 8U + hLen + saltLen;
   KRML_CHECK_SIZE(sizeof (uint8_t), m1Len);
   uint8_t *m1 = (uint8_t *)alloca(m1Len * sizeof (uint8_t));
   memset(m1, 0U, m1Len * sizeof (uint8_t));
-  hash(a, m1 + (uint32_t)8U, msgLen, msg);
-  memcpy(m1 + (uint32_t)8U + hLen, salt, saltLen * sizeof (uint8_t));
+  hash(a, m1 + 8U, msgLen, msg);
+  memcpy(m1 + 8U + hLen, salt, saltLen * sizeof (uint8_t));
   hash(a, m1Hash0, m1Len, m1);
-  uint8_t res0 = (uint8_t)255U;
-  for (uint32_t i = (uint32_t)0U; i < hLen; i++)
+  uint8_t res0 = 255U;
+  for (uint32_t i = 0U; i < hLen; i++)
   {
     uint8_t uu____1 = FStar_UInt8_eq_mask(m1Hash0[i], m1Hash[i]);
-    res0 = uu____1 & res0;
+    res0 = (uint32_t)uu____1 & (uint32_t)res0;
   }
   uint8_t z0 = res0;
-  return z0 == (uint8_t)255U;
+  return z0 == 255U;
 }
 
 static inline bool
 load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb, uint64_t *pkey)
 {
-  uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey;
   uint64_t *r2 = pkey + nLen;
   uint64_t *e = pkey + nLen + nLen;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m1 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m1;
-  return m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return m == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 static inline bool
@@ -388,16 +383,16 @@ load_skey(
   uint64_t *skey
 )
 {
-  uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t dbLen = (dBits - 1U) / 8U + 1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen + nLen + eLen;
   uint64_t *pkey = skey;
   uint64_t *d = skey + pkeyLen;
   bool b = load_pkey(modBits, eBits, nb, eb, pkey);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d);
   uint64_t m1 = check_exponent_u64(dBits, d);
-  return b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  return b && m1 == 0xFFFFFFFFFFFFFFFFULL;
 }
 
 /**
@@ -435,45 +430,36 @@ Hacl_RSAPSS_rsapss_sign(
 {
   uint32_t hLen = hash_len(a);
   bool
-  b =
-    saltLen
-    <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U
-    &&
-      saltLen
-      + hLen
-      + (uint32_t)2U
-      <= (modBits - (uint32_t)1U - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  b = saltLen <= 0xffffffffU - hLen - 8U && saltLen + hLen + 2U <= (modBits - 1U - 1U) / 8U + 1U;
   if (b)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
     uint64_t *m = (uint64_t *)alloca(nLen * sizeof (uint64_t));
     memset(m, 0U, nLen * sizeof (uint64_t));
-    uint32_t emBits = modBits - (uint32_t)1U;
-    uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t emBits = modBits - 1U;
+    uint32_t emLen = (emBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint8_t), emLen);
     uint8_t *em = (uint8_t *)alloca(emLen * sizeof (uint8_t));
     memset(em, 0U, emLen * sizeof (uint8_t));
     pss_encode(a, saltLen, salt, msgLen, msg, emBits, em);
     Hacl_Bignum_Convert_bn_from_bytes_be_uint64(emLen, em, m);
-    uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+    uint32_t k = (modBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t *s = (uint64_t *)alloca(nLen1 * sizeof (uint64_t));
     memset(s, 0U, nLen1 * sizeof (uint64_t));
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t *m_ = (uint64_t *)alloca(nLen1 * sizeof (uint64_t));
     memset(m_, 0U, nLen1 * sizeof (uint64_t));
-    uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
     uint64_t *n = skey;
     uint64_t *r2 = skey + nLen2;
     uint64_t *e = skey + nLen2 + nLen2;
     uint64_t *d = skey + nLen2 + nLen2 + eLen;
     uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - (uint32_t)1U)
-      / (uint32_t)64U
-      + (uint32_t)1U,
+    Hacl_Bignum_Exponentiation_bn_mod_exp_consttime_precomp_u64((modBits - 1U) / 64U + 1U,
       n,
       mu,
       r2,
@@ -482,9 +468,7 @@ Hacl_RSAPSS_rsapss_sign(
       d,
       s);
     uint64_t mu0 = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - (uint32_t)1U)
-      / (uint32_t)64U
-      + (uint32_t)1U,
+    Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U,
       n,
       mu0,
       r2,
@@ -492,22 +476,22 @@ Hacl_RSAPSS_rsapss_sign(
       eBits,
       e,
       m_);
-    uint64_t mask = (uint64_t)0xFFFFFFFFFFFFFFFFU;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    uint64_t mask = 0xFFFFFFFFFFFFFFFFULL;
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t uu____0 = FStar_UInt64_eq_mask(m[i], m_[i]);
       mask = uu____0 & mask;
     }
     uint64_t mask1 = mask;
     uint64_t eq_m = mask1;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t *os = s;
       uint64_t x = s[i];
       uint64_t x0 = eq_m & x;
       os[i] = x0;
     }
-    bool eq_b = eq_m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+    bool eq_b = eq_m == 0xFFFFFFFFFFFFFFFFULL;
     Hacl_Bignum_Convert_bn_to_bytes_be_uint64(k, s, sgnt);
     bool eq_b0 = eq_b;
     return eq_b0;
@@ -547,42 +531,36 @@ Hacl_RSAPSS_rsapss_verify(
 )
 {
   uint32_t hLen = hash_len(a);
-  bool
-  b =
-    saltLen
-    <= (uint32_t)0xffffffffU - hLen - (uint32_t)8U
-    && sgntLen == (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+  bool b = saltLen <= 0xffffffffU - hLen - 8U && sgntLen == (modBits - 1U) / 8U + 1U;
   if (b)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen);
     uint64_t *m = (uint64_t *)alloca(nLen * sizeof (uint64_t));
     memset(m, 0U, nLen * sizeof (uint64_t));
-    uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t k = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+    uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+    uint32_t k = (modBits - 1U) / 8U + 1U;
     KRML_CHECK_SIZE(sizeof (uint64_t), nLen1);
     uint64_t *s = (uint64_t *)alloca(nLen1 * sizeof (uint64_t));
     memset(s, 0U, nLen1 * sizeof (uint64_t));
     Hacl_Bignum_Convert_bn_from_bytes_be_uint64(k, sgnt, s);
-    uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+    uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
     uint64_t *n = pkey;
     uint64_t *r2 = pkey + nLen2;
     uint64_t *e = pkey + nLen2 + nLen2;
-    uint64_t acc = (uint64_t)0U;
-    for (uint32_t i = (uint32_t)0U; i < nLen2; i++)
+    uint64_t acc = 0ULL;
+    for (uint32_t i = 0U; i < nLen2; i++)
     {
       uint64_t beq = FStar_UInt64_eq_mask(s[i], n[i]);
       uint64_t blt = ~FStar_UInt64_gte_mask(s[i], n[i]);
-      acc = (beq & acc) | (~beq & ((blt & (uint64_t)0xFFFFFFFFFFFFFFFFU) | (~blt & (uint64_t)0U)));
+      acc = (beq & acc) | (~beq & ((blt & 0xFFFFFFFFFFFFFFFFULL) | (~blt & 0ULL)));
     }
     uint64_t mask = acc;
     bool res;
-    if (mask == (uint64_t)0xFFFFFFFFFFFFFFFFU)
+    if (mask == 0xFFFFFFFFFFFFFFFFULL)
     {
       uint64_t mu = Hacl_Bignum_ModInvLimb_mod_inv_uint64(n[0U]);
-      Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - (uint32_t)1U)
-        / (uint32_t)64U
-        + (uint32_t)1U,
+      Hacl_Bignum_Exponentiation_bn_mod_exp_vartime_precomp_u64((modBits - 1U) / 64U + 1U,
         n,
         mu,
         r2,
@@ -591,17 +569,17 @@ Hacl_RSAPSS_rsapss_verify(
         e,
         m);
       bool ite;
-      if (!((modBits - (uint32_t)1U) % (uint32_t)8U == (uint32_t)0U))
+      if (!((modBits - 1U) % 8U == 0U))
       {
         ite = true;
       }
       else
       {
-        uint32_t i = (modBits - (uint32_t)1U) / (uint32_t)64U;
-        uint32_t j = (modBits - (uint32_t)1U) % (uint32_t)64U;
+        uint32_t i = (modBits - 1U) / 64U;
+        uint32_t j = (modBits - 1U) % 64U;
         uint64_t tmp = m[i];
-        uint64_t get_bit = tmp >> j & (uint64_t)1U;
-        ite = get_bit == (uint64_t)0U;
+        uint64_t get_bit = tmp >> j & 1ULL;
+        ite = get_bit == 0ULL;
       }
       if (ite)
       {
@@ -620,8 +598,8 @@ Hacl_RSAPSS_rsapss_verify(
     bool b10 = b1;
     if (b10)
     {
-      uint32_t emBits = modBits - (uint32_t)1U;
-      uint32_t emLen = (emBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
+      uint32_t emBits = modBits - 1U;
+      uint32_t emLen = (emBits - 1U) / 8U + 1U;
       KRML_CHECK_SIZE(sizeof (uint8_t), emLen);
       uint8_t *em = (uint8_t *)alloca(emLen * sizeof (uint8_t));
       memset(em, 0U, emLen * sizeof (uint8_t));
@@ -649,15 +627,11 @@ uint64_t
 *Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb)
 {
   bool ite;
-  if ((uint32_t)1U < modBits && (uint32_t)0U < eBits)
+  if (1U < modBits && 0U < eBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite =
-      nLen
-      <= (uint32_t)33554431U
-      && eLen <= (uint32_t)67108863U
-      && nLen + nLen <= (uint32_t)0xffffffffU - eLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    ite = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen;
   }
   else
   {
@@ -667,8 +641,8 @@ uint64_t
   {
     return NULL;
   }
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen + nLen + eLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), pkeyLen);
   uint64_t *pkey = (uint64_t *)KRML_HOST_CALLOC(pkeyLen, sizeof (uint64_t));
@@ -678,24 +652,19 @@ uint64_t
   }
   uint64_t *pkey1 = pkey;
   uint64_t *pkey2 = pkey1;
-  uint32_t nbLen = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey2;
   uint64_t *r2 = pkey2 + nLen1;
   uint64_t *e = pkey2 + nLen1 + nLen1;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m1 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m1;
-  bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b = m == 0xFFFFFFFFFFFFFFFFULL;
   if (b)
   {
     return pkey2;
@@ -727,27 +696,23 @@ uint64_t
 )
 {
   bool ite0;
-  if ((uint32_t)1U < modBits && (uint32_t)0U < eBits)
+  if (1U < modBits && 0U < eBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite0 =
-      nLen
-      <= (uint32_t)33554431U
-      && eLen <= (uint32_t)67108863U
-      && nLen + nLen <= (uint32_t)0xffffffffU - eLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    ite0 = nLen <= 33554431U && eLen <= 67108863U && nLen + nLen <= 0xffffffffU - eLen;
   }
   else
   {
     ite0 = false;
   }
   bool ite;
-  if (ite0 && (uint32_t)0U < dBits)
+  if (ite0 && 0U < dBits)
   {
-    uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-    ite = dLen <= (uint32_t)67108863U && (uint32_t)2U * nLen <= (uint32_t)0xffffffffU - eLen - dLen;
+    uint32_t nLen = (modBits - 1U) / 64U + 1U;
+    uint32_t eLen = (eBits - 1U) / 64U + 1U;
+    uint32_t dLen = (dBits - 1U) / 64U + 1U;
+    ite = dLen <= 67108863U && 2U * nLen <= 0xffffffffU - eLen - dLen;
   }
   else
   {
@@ -757,9 +722,9 @@ uint64_t
   {
     return NULL;
   }
-  uint32_t nLen = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t dLen = (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nLen = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen = (eBits - 1U) / 64U + 1U;
+  uint32_t dLen = (dBits - 1U) / 64U + 1U;
   uint32_t skeyLen = nLen + nLen + eLen + dLen;
   KRML_CHECK_SIZE(sizeof (uint64_t), skeyLen);
   uint64_t *skey = (uint64_t *)KRML_HOST_CALLOC(skeyLen, sizeof (uint64_t));
@@ -769,33 +734,28 @@ uint64_t
   }
   uint64_t *skey1 = skey;
   uint64_t *skey2 = skey1;
-  uint32_t dbLen = (dBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen1 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
-  uint32_t eLen1 = (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t dbLen = (dBits - 1U) / 8U + 1U;
+  uint32_t nLen1 = (modBits - 1U) / 64U + 1U;
+  uint32_t eLen1 = (eBits - 1U) / 64U + 1U;
   uint32_t pkeyLen = nLen1 + nLen1 + eLen1;
   uint64_t *pkey = skey2;
   uint64_t *d = skey2 + pkeyLen;
-  uint32_t nbLen1 = (modBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t ebLen1 = (eBits - (uint32_t)1U) / (uint32_t)8U + (uint32_t)1U;
-  uint32_t nLen2 = (modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U;
+  uint32_t nbLen1 = (modBits - 1U) / 8U + 1U;
+  uint32_t ebLen1 = (eBits - 1U) / 8U + 1U;
+  uint32_t nLen2 = (modBits - 1U) / 64U + 1U;
   uint64_t *n = pkey;
   uint64_t *r2 = pkey + nLen2;
   uint64_t *e = pkey + nLen2 + nLen2;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(nbLen1, nb, n);
-  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - (uint32_t)1U)
-    / (uint32_t)64U
-    + (uint32_t)1U,
-    modBits - (uint32_t)1U,
-    n,
-    r2);
+  Hacl_Bignum_Montgomery_bn_precomp_r2_mod_n_u64((modBits - 1U) / 64U + 1U, modBits - 1U, n, r2);
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(ebLen1, eb, e);
   uint64_t m0 = check_modulus_u64(modBits, n);
   uint64_t m10 = check_exponent_u64(eBits, e);
   uint64_t m = m0 & m10;
-  bool b = m == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b = m == 0xFFFFFFFFFFFFFFFFULL;
   Hacl_Bignum_Convert_bn_from_bytes_be_uint64(dbLen, db, d);
   uint64_t m1 = check_exponent_u64(dBits, d);
-  bool b0 = b && m1 == (uint64_t)0xFFFFFFFFFFFFFFFFU;
+  bool b0 = b && m1 == 0xFFFFFFFFFFFFFFFFULL;
   if (b0)
   {
     return skey2;
@@ -842,23 +802,17 @@ Hacl_RSAPSS_rsapss_skey_sign(
 )
 {
   KRML_CHECK_SIZE(sizeof (uint64_t),
-    (uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-    + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U);
+    2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U);
   uint64_t
   *skey =
-    (uint64_t *)alloca(((uint32_t)2U
-      * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-      + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-      + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
+    (uint64_t *)alloca((2U
+      * ((modBits - 1U) / 64U + 1U)
+      + (eBits - 1U) / 64U + 1U
+      + (dBits - 1U) / 64U + 1U)
       * sizeof (uint64_t));
   memset(skey,
     0U,
-    ((uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U
-    + (dBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
+    (2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U + (dBits - 1U) / 64U + 1U)
     * sizeof (uint64_t));
   bool b = load_skey(modBits, eBits, dBits, nb, eb, db, skey);
   if (b)
@@ -911,22 +865,14 @@ Hacl_RSAPSS_rsapss_pkey_verify(
   uint8_t *msg
 )
 {
-  KRML_CHECK_SIZE(sizeof (uint64_t),
-    (uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U);
+  KRML_CHECK_SIZE(sizeof (uint64_t), 2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U);
   uint64_t
   *pkey =
-    (uint64_t *)alloca(((uint32_t)2U
-      * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-      + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
+    (uint64_t *)alloca((2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U)
       * sizeof (uint64_t));
   memset(pkey,
     0U,
-    ((uint32_t)2U
-    * ((modBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    + (eBits - (uint32_t)1U) / (uint32_t)64U + (uint32_t)1U)
-    * sizeof (uint64_t));
+    (2U * ((modBits - 1U) / 64U + 1U) + (eBits - 1U) / 64U + 1U) * sizeof (uint64_t));
   bool b = load_pkey(modBits, eBits, nb, eb, pkey);
   if (b)
   {
diff --git a/src/msvc/Hacl_SHA2_Vec128.c b/src/msvc/Hacl_SHA2_Vec128.c
index e1b6e304..02af75b1 100644
--- a/src/msvc/Hacl_SHA2_Vec128.c
+++ b/src/msvc/Hacl_SHA2_Vec128.c
@@ -32,21 +32,21 @@
 static inline void sha224_init4(Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
-    uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i];
+    uint32_t hi = Hacl_Hash_SHA2_h224[i];
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi);
     os[i] = x;);
 }
 
 static inline void
-sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash)
+sha224_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -55,18 +55,18 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 16U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 16U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 16U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 16U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 48U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 48U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 48U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 48U);
   Lib_IntVector_Intrinsics_vec128 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec128 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec128 v20 = ws[2U];
@@ -196,14 +196,14 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec128 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec128 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec128 b0 = hash[1U];
@@ -218,10 +218,10 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       t1 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02,
                 Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))),
             k_e_t),
@@ -229,10 +229,10 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       Lib_IntVector_Intrinsics_vec128
       t2 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0),
               Lib_IntVector_Intrinsics_vec128_and(b0, c0))));
@@ -252,30 +252,30 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec128 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec128
         s1 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec128
         s0 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1,
                 t7),
@@ -283,9 +283,9 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     Lib_IntVector_Intrinsics_vec128
     x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]);
@@ -295,22 +295,22 @@ sha224_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
 static inline void
 sha224_update_nblocks4(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec128 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    Hacl_Impl_SHA2_Types_uint8_4p
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    Hacl_Hash_SHA2_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha224_update4(mb, st);
   }
@@ -320,69 +320,69 @@ static inline void
 sha224_update_last4(
   uint64_t totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec128 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[512U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)64U;
+  uint8_t *last11 = last3 + 64U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } };
-  Hacl_Impl_SHA2_Types_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd;
   sha224_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha224_update4(last1, hash);
     return;
@@ -390,7 +390,7 @@ sha224_update_last4(
 }
 
 static inline void
-sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4p h)
+sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h)
 {
   uint8_t hbuf[128U] = { 0U };
   Lib_IntVector_Intrinsics_vec128 v00 = st[0U];
@@ -458,18 +458,18 @@ sha224_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * 16U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)28U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 28U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 28U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 28U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 28U * sizeof (uint8_t));
 }
 
 void
@@ -485,16 +485,16 @@ Hacl_SHA2_Vec128_sha224_4(
   uint8_t *input3
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U };
   sha224_init4(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -503,7 +503,7 @@ Hacl_SHA2_Vec128_sha224_4(
   uint8_t *bl1 = b1 + input_len - rem1;
   uint8_t *bl2 = b2 + input_len - rem1;
   uint8_t *bl3 = b3 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
   sha224_update_last4(len_, rem, lb, st);
   sha224_finish4(st, rb);
@@ -512,21 +512,21 @@ Hacl_SHA2_Vec128_sha224_4(
 static inline void sha256_init4(Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
-    uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t hi = Hacl_Hash_SHA2_h256[i];
     Lib_IntVector_Intrinsics_vec128 x = Lib_IntVector_Intrinsics_vec128_load32(hi);
     os[i] = x;);
 }
 
 static inline void
-sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash)
+sha256_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec128 *hash)
 {
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 hash_old[8U] KRML_POST_ALIGN(16) = { 0U };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 ws[16U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec128));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec128));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -535,18 +535,18 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[1U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)16U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)16U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)16U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)16U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + (uint32_t)48U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + (uint32_t)48U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + (uint32_t)48U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + (uint32_t)48U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 16U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 16U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 16U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 16U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec128_load32_be(b00 + 48U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec128_load32_be(b10 + 48U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec128_load32_be(b2 + 48U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec128_load32_be(b3 + 48U);
   Lib_IntVector_Intrinsics_vec128 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec128 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec128 v20 = ws[2U];
@@ -676,14 +676,14 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec128 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec128 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec128 b0 = hash[1U];
@@ -698,10 +698,10 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       t1 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(h02,
                 Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec128_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec128_and(Lib_IntVector_Intrinsics_vec128_lognot(e0), g0))),
             k_e_t),
@@ -709,10 +709,10 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       Lib_IntVector_Intrinsics_vec128
       t2 =
         Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec128_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, b0),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_and(a0, c0),
               Lib_IntVector_Intrinsics_vec128_and(b0, c0))));
@@ -732,30 +732,30 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec128 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec128 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec128 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec128
         s1 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec128
         s0 =
           Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec128_xor(Lib_IntVector_Intrinsics_vec128_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec128_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(Lib_IntVector_Intrinsics_vec128_add32(s1,
                 t7),
@@ -763,9 +763,9 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec128 *os = hash;
     Lib_IntVector_Intrinsics_vec128
     x = Lib_IntVector_Intrinsics_vec128_add32(hash[i], hash_old[i]);
@@ -775,22 +775,22 @@ sha256_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec128
 static inline void
 sha256_update_nblocks4(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec128 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    Hacl_Impl_SHA2_Types_uint8_4p
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    Hacl_Hash_SHA2_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha256_update4(mb, st);
   }
@@ -800,69 +800,69 @@ static inline void
 sha256_update_last4(
   uint64_t totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec128 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[512U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)64U;
+  uint8_t *last11 = last3 + 64U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } };
-  Hacl_Impl_SHA2_Types_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd;
   sha256_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update4(last1, hash);
     return;
@@ -870,7 +870,7 @@ sha256_update_last4(
 }
 
 static inline void
-sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4p h)
+sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Hash_SHA2_uint8_4p h)
 {
   uint8_t hbuf[128U] = { 0U };
   Lib_IntVector_Intrinsics_vec128 v00 = st[0U];
@@ -938,18 +938,18 @@ sha256_finish4(Lib_IntVector_Intrinsics_vec128 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * (uint32_t)16U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec128_store32_be(hbuf + i * 16U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 32U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 32U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 32U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 32U * sizeof (uint8_t));
 }
 
 void
@@ -965,16 +965,16 @@ Hacl_SHA2_Vec128_sha256_4(
   uint8_t *input3
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 st[8U] KRML_POST_ALIGN(16) = { 0U };
   sha256_init4(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha256_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -983,7 +983,7 @@ Hacl_SHA2_Vec128_sha256_4(
   uint8_t *bl1 = b1 + input_len - rem1;
   uint8_t *bl2 = b2 + input_len - rem1;
   uint8_t *bl3 = b3 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
   sha256_update_last4(len_, rem, lb, st);
   sha256_finish4(st, rb);
diff --git a/src/msvc/Hacl_SHA2_Vec256.c b/src/msvc/Hacl_SHA2_Vec256.c
index b74ce621..c34767f5 100644
--- a/src/msvc/Hacl_SHA2_Vec256.c
+++ b/src/msvc/Hacl_SHA2_Vec256.c
@@ -33,21 +33,21 @@
 static inline void sha224_init8(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
-    uint32_t hi = Hacl_Impl_SHA2_Generic_h224[i];
+    uint32_t hi = Hacl_Hash_SHA2_h224[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi);
     os[i] = x;);
 }
 
 static inline void
-sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash)
+sha224_update8(Hacl_Hash_SHA2_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = b.snd.snd.snd.snd.snd.fst;
@@ -64,14 +64,14 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5);
   ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6);
   ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + 32U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + 32U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + 32U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + 32U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -281,14 +281,14 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -303,10 +303,10 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -314,10 +314,10 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -337,30 +337,30 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1,
                 t7),
@@ -368,9 +368,9 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]);
@@ -380,12 +380,12 @@ sha224_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
 static inline void
 sha224_update_nblocks8(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_8p b,
+  Hacl_Hash_SHA2_uint8_8p b,
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
     uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -395,15 +395,15 @@ sha224_update_nblocks8(
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    uint8_t *bl4 = b4 + i * (uint32_t)64U;
-    uint8_t *bl5 = b5 + i * (uint32_t)64U;
-    uint8_t *bl6 = b6 + i * (uint32_t)64U;
-    uint8_t *bl7 = b7 + i * (uint32_t)64U;
-    Hacl_Impl_SHA2_Types_uint8_8p
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    uint8_t *bl4 = b4 + i * 64U;
+    uint8_t *bl5 = b5 + i * 64U;
+    uint8_t *bl6 = b6 + i * 64U;
+    uint8_t *bl7 = b7 + i * 64U;
+    Hacl_Hash_SHA2_uint8_8p
     mb =
       {
         .fst = bl0,
@@ -426,23 +426,23 @@ static inline void
 sha224_update_last8(
   uint64_t totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_8p b,
+  Hacl_Hash_SHA2_uint8_8p b,
   Lib_IntVector_Intrinsics_vec256 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -453,70 +453,70 @@ sha224_update_last8(
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
-  uint8_t *last4 = last + (uint32_t)512U;
-  uint8_t *last5 = last + (uint32_t)640U;
-  uint8_t *last6 = last + (uint32_t)768U;
-  uint8_t *last7 = last + (uint32_t)896U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
+  uint8_t *last4 = last + 512U;
+  uint8_t *last5 = last + 640U;
+  uint8_t *last6 = last + 768U;
+  uint8_t *last7 = last + 896U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last013 = last3;
-  uint8_t *last113 = last3 + (uint32_t)64U;
+  uint8_t *last113 = last3 + 64U;
   uint8_t *l30 = last013;
   uint8_t *l31 = last113;
   memcpy(last4, b4, len * sizeof (uint8_t));
-  last4[len] = (uint8_t)0x80U;
-  memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last4[len] = 0x80U;
+  memcpy(last4 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last014 = last4;
-  uint8_t *last114 = last4 + (uint32_t)64U;
+  uint8_t *last114 = last4 + 64U;
   uint8_t *l40 = last014;
   uint8_t *l41 = last114;
   memcpy(last5, b5, len * sizeof (uint8_t));
-  last5[len] = (uint8_t)0x80U;
-  memcpy(last5 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last5[len] = 0x80U;
+  memcpy(last5 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last015 = last5;
-  uint8_t *last115 = last5 + (uint32_t)64U;
+  uint8_t *last115 = last5 + 64U;
   uint8_t *l50 = last015;
   uint8_t *l51 = last115;
   memcpy(last6, b6, len * sizeof (uint8_t));
-  last6[len] = (uint8_t)0x80U;
-  memcpy(last6 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last6[len] = 0x80U;
+  memcpy(last6 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last016 = last6;
-  uint8_t *last116 = last6 + (uint32_t)64U;
+  uint8_t *last116 = last6 + 64U;
   uint8_t *l60 = last016;
   uint8_t *l61 = last116;
   memcpy(last7, b7, len * sizeof (uint8_t));
-  last7[len] = (uint8_t)0x80U;
-  memcpy(last7 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last7[len] = 0x80U;
+  memcpy(last7 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last7;
-  uint8_t *last11 = last7 + (uint32_t)64U;
+  uint8_t *last11 = last7 + 64U;
   uint8_t *l70 = last01;
   uint8_t *l71 = last11;
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   mb0 =
     {
       .fst = l00,
@@ -531,7 +531,7 @@ sha224_update_last8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   mb1 =
     {
       .fst = l01,
@@ -546,11 +546,11 @@ sha224_update_last8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_2x8p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_8p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_8p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x8p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_8p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_8p last1 = scrut.snd;
   sha224_update8(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha224_update8(last1, hash);
     return;
@@ -558,7 +558,7 @@ sha224_update_last8(
 }
 
 static inline void
-sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8p h)
+sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_8p h)
 {
   uint8_t hbuf[256U] = { 0U };
   Lib_IntVector_Intrinsics_vec256 v0 = st[0U];
@@ -662,10 +662,10 @@ sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   st[6U] = st6_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * 32U, st[i]););
   uint8_t *b7 = h.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = h.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = h.snd.snd.snd.snd.snd.fst;
@@ -674,14 +674,14 @@ sha224_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b4, hbuf + (uint32_t)128U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b5, hbuf + (uint32_t)160U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b6, hbuf + (uint32_t)192U, (uint32_t)28U * sizeof (uint8_t));
-  memcpy(b7, hbuf + (uint32_t)224U, (uint32_t)28U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 28U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 28U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 28U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 28U * sizeof (uint8_t));
+  memcpy(b4, hbuf + 128U, 28U * sizeof (uint8_t));
+  memcpy(b5, hbuf + 160U, 28U * sizeof (uint8_t));
+  memcpy(b6, hbuf + 192U, 28U * sizeof (uint8_t));
+  memcpy(b7, hbuf + 224U, 28U * sizeof (uint8_t));
 }
 
 void
@@ -705,7 +705,7 @@ Hacl_SHA2_Vec256_sha224_8(
   uint8_t *input7
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   ib =
     {
       .fst = input0,
@@ -723,7 +723,7 @@ Hacl_SHA2_Vec256_sha224_8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   rb =
     {
       .fst = dst0,
@@ -740,10 +740,10 @@ Hacl_SHA2_Vec256_sha224_8(
     };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha224_init8(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha224_update_nblocks8(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst;
@@ -760,7 +760,7 @@ Hacl_SHA2_Vec256_sha224_8(
   uint8_t *bl5 = b5 + input_len - rem1;
   uint8_t *bl6 = b6 + input_len - rem1;
   uint8_t *bl7 = b7 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   lb =
     {
       .fst = bl0,
@@ -782,21 +782,21 @@ Hacl_SHA2_Vec256_sha224_8(
 static inline void sha256_init8(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
-    uint32_t hi = Hacl_Impl_SHA2_Generic_h256[i];
+    uint32_t hi = Hacl_Hash_SHA2_h256[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load32(hi);
     os[i] = x;);
 }
 
 static inline void
-sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash)
+sha256_update8(Hacl_Hash_SHA2_uint8_8p b, Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = b.snd.snd.snd.snd.snd.fst;
@@ -813,14 +813,14 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[5U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5);
   ws[6U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6);
   ws[7U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + (uint32_t)32U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + (uint32_t)32U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + (uint32_t)32U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + (uint32_t)32U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + (uint32_t)32U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + (uint32_t)32U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + (uint32_t)32U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + (uint32_t)32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load32_be(b00 + 32U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load32_be(b10 + 32U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load32_be(b2 + 32U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load32_be(b3 + 32U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load32_be(b4 + 32U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load32_be(b5 + 32U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load32_be(b6 + 32U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load32_be(b7 + 32U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -1030,14 +1030,14 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR4(i0,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint32_t k_t = Hacl_Hash_SHA2_k224_256[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -1052,10 +1052,10 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                    (uint32_t)6U),
+                    6U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(e0,
-                      (uint32_t)11U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, (uint32_t)25U)))),
+                      11U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right32(e0, 25U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -1063,10 +1063,10 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-              (uint32_t)2U),
+              2U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(a0,
-                (uint32_t)13U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, (uint32_t)22U))),
+                13U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right32(a0, 22U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -1086,30 +1086,30 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)3U)
+    if (i0 < 3U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-              (uint32_t)17U),
+              17U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t2,
-                (uint32_t)19U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, (uint32_t)10U)));
+                19U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t2, 10U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-              (uint32_t)7U),
+              7U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right32(t15,
-                (uint32_t)18U),
-              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, (uint32_t)3U)));
+                18U),
+              Lib_IntVector_Intrinsics_vec256_shift_right32(t15, 3U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(Lib_IntVector_Intrinsics_vec256_add32(s1,
                 t7),
@@ -1117,9 +1117,9 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add32(hash[i], hash_old[i]);
@@ -1129,12 +1129,12 @@ sha256_update8(Hacl_Impl_SHA2_Types_uint8_8p b, Lib_IntVector_Intrinsics_vec256
 static inline void
 sha256_update_nblocks8(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_8p b,
+  Hacl_Hash_SHA2_uint8_8p b,
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)64U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 64U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
     uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -1144,15 +1144,15 @@ sha256_update_nblocks8(
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)64U;
-    uint8_t *bl1 = b1 + i * (uint32_t)64U;
-    uint8_t *bl2 = b2 + i * (uint32_t)64U;
-    uint8_t *bl3 = b3 + i * (uint32_t)64U;
-    uint8_t *bl4 = b4 + i * (uint32_t)64U;
-    uint8_t *bl5 = b5 + i * (uint32_t)64U;
-    uint8_t *bl6 = b6 + i * (uint32_t)64U;
-    uint8_t *bl7 = b7 + i * (uint32_t)64U;
-    Hacl_Impl_SHA2_Types_uint8_8p
+    uint8_t *bl0 = b0 + i * 64U;
+    uint8_t *bl1 = b1 + i * 64U;
+    uint8_t *bl2 = b2 + i * 64U;
+    uint8_t *bl3 = b3 + i * 64U;
+    uint8_t *bl4 = b4 + i * 64U;
+    uint8_t *bl5 = b5 + i * 64U;
+    uint8_t *bl6 = b6 + i * 64U;
+    uint8_t *bl7 = b7 + i * 64U;
+    Hacl_Hash_SHA2_uint8_8p
     mb =
       {
         .fst = bl0,
@@ -1175,23 +1175,23 @@ static inline void
 sha256_update_last8(
   uint64_t totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_8p b,
+  Hacl_Hash_SHA2_uint8_8p b,
   Lib_IntVector_Intrinsics_vec256 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U)
+  if (len + 8U + 1U <= 64U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)64U;
+  uint32_t fin = blocks * 64U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[8U] = { 0U };
-  uint64_t total_len_bits = totlen << (uint32_t)3U;
+  uint64_t total_len_bits = totlen << 3U;
   store64_be(totlen_buf, total_len_bits);
   uint8_t *b7 = b.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = b.snd.snd.snd.snd.snd.snd.fst;
@@ -1202,70 +1202,70 @@ sha256_update_last8(
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)128U;
-  uint8_t *last2 = last + (uint32_t)256U;
-  uint8_t *last3 = last + (uint32_t)384U;
-  uint8_t *last4 = last + (uint32_t)512U;
-  uint8_t *last5 = last + (uint32_t)640U;
-  uint8_t *last6 = last + (uint32_t)768U;
-  uint8_t *last7 = last + (uint32_t)896U;
+  uint8_t *last10 = last + 128U;
+  uint8_t *last2 = last + 256U;
+  uint8_t *last3 = last + 384U;
+  uint8_t *last4 = last + 512U;
+  uint8_t *last5 = last + 640U;
+  uint8_t *last6 = last + 768U;
+  uint8_t *last7 = last + 896U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)64U;
+  uint8_t *last110 = last00 + 64U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)64U;
+  uint8_t *last111 = last10 + 64U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)64U;
+  uint8_t *last112 = last2 + 64U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last013 = last3;
-  uint8_t *last113 = last3 + (uint32_t)64U;
+  uint8_t *last113 = last3 + 64U;
   uint8_t *l30 = last013;
   uint8_t *l31 = last113;
   memcpy(last4, b4, len * sizeof (uint8_t));
-  last4[len] = (uint8_t)0x80U;
-  memcpy(last4 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last4[len] = 0x80U;
+  memcpy(last4 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last014 = last4;
-  uint8_t *last114 = last4 + (uint32_t)64U;
+  uint8_t *last114 = last4 + 64U;
   uint8_t *l40 = last014;
   uint8_t *l41 = last114;
   memcpy(last5, b5, len * sizeof (uint8_t));
-  last5[len] = (uint8_t)0x80U;
-  memcpy(last5 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last5[len] = 0x80U;
+  memcpy(last5 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last015 = last5;
-  uint8_t *last115 = last5 + (uint32_t)64U;
+  uint8_t *last115 = last5 + 64U;
   uint8_t *l50 = last015;
   uint8_t *l51 = last115;
   memcpy(last6, b6, len * sizeof (uint8_t));
-  last6[len] = (uint8_t)0x80U;
-  memcpy(last6 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last6[len] = 0x80U;
+  memcpy(last6 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last016 = last6;
-  uint8_t *last116 = last6 + (uint32_t)64U;
+  uint8_t *last116 = last6 + 64U;
   uint8_t *l60 = last016;
   uint8_t *l61 = last116;
   memcpy(last7, b7, len * sizeof (uint8_t));
-  last7[len] = (uint8_t)0x80U;
-  memcpy(last7 + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t));
+  last7[len] = 0x80U;
+  memcpy(last7 + fin - 8U, totlen_buf, 8U * sizeof (uint8_t));
   uint8_t *last01 = last7;
-  uint8_t *last11 = last7 + (uint32_t)64U;
+  uint8_t *last11 = last7 + 64U;
   uint8_t *l70 = last01;
   uint8_t *l71 = last11;
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   mb0 =
     {
       .fst = l00,
@@ -1280,7 +1280,7 @@ sha256_update_last8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   mb1 =
     {
       .fst = l01,
@@ -1295,11 +1295,11 @@ sha256_update_last8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_2x8p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_8p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_8p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x8p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_8p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_8p last1 = scrut.snd;
   sha256_update8(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha256_update8(last1, hash);
     return;
@@ -1307,7 +1307,7 @@ sha256_update_last8(
 }
 
 static inline void
-sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8p h)
+sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_8p h)
 {
   uint8_t hbuf[256U] = { 0U };
   Lib_IntVector_Intrinsics_vec256 v0 = st[0U];
@@ -1411,10 +1411,10 @@ sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   st[6U] = st6_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store32_be(hbuf + i * 32U, st[i]););
   uint8_t *b7 = h.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = h.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = h.snd.snd.snd.snd.snd.fst;
@@ -1423,14 +1423,14 @@ sha256_finish8(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_8
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)32U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)64U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)96U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b4, hbuf + (uint32_t)128U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b5, hbuf + (uint32_t)160U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b6, hbuf + (uint32_t)192U, (uint32_t)32U * sizeof (uint8_t));
-  memcpy(b7, hbuf + (uint32_t)224U, (uint32_t)32U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 32U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 32U, 32U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 64U, 32U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 96U, 32U * sizeof (uint8_t));
+  memcpy(b4, hbuf + 128U, 32U * sizeof (uint8_t));
+  memcpy(b5, hbuf + 160U, 32U * sizeof (uint8_t));
+  memcpy(b6, hbuf + 192U, 32U * sizeof (uint8_t));
+  memcpy(b7, hbuf + 224U, 32U * sizeof (uint8_t));
 }
 
 void
@@ -1454,7 +1454,7 @@ Hacl_SHA2_Vec256_sha256_8(
   uint8_t *input7
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   ib =
     {
       .fst = input0,
@@ -1472,7 +1472,7 @@ Hacl_SHA2_Vec256_sha256_8(
         }
       }
     };
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   rb =
     {
       .fst = dst0,
@@ -1489,10 +1489,10 @@ Hacl_SHA2_Vec256_sha256_8(
     };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha256_init8(st);
-  uint32_t rem = input_len % (uint32_t)64U;
+  uint32_t rem = input_len % 64U;
   uint64_t len_ = (uint64_t)input_len;
   sha256_update_nblocks8(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)64U;
+  uint32_t rem1 = input_len % 64U;
   uint8_t *b7 = ib.snd.snd.snd.snd.snd.snd.snd;
   uint8_t *b6 = ib.snd.snd.snd.snd.snd.snd.fst;
   uint8_t *b5 = ib.snd.snd.snd.snd.snd.fst;
@@ -1509,7 +1509,7 @@ Hacl_SHA2_Vec256_sha256_8(
   uint8_t *bl5 = b5 + input_len - rem1;
   uint8_t *bl6 = b6 + input_len - rem1;
   uint8_t *bl7 = b7 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_8p
+  Hacl_Hash_SHA2_uint8_8p
   lb =
     {
       .fst = bl0,
@@ -1531,21 +1531,21 @@ Hacl_SHA2_Vec256_sha256_8(
 static inline void sha384_init4(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
-    uint64_t hi = Hacl_Impl_SHA2_Generic_h384[i];
+    uint64_t hi = Hacl_Hash_SHA2_h384[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi);
     os[i] = x;);
 }
 
 static inline void
-sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash)
+sha384_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -1554,18 +1554,18 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 32U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 32U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 32U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 64U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 64U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 64U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 64U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 96U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 96U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 96U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 96U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -1679,14 +1679,14 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Hash_SHA2_k384_512[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -1701,10 +1701,10 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                    (uint32_t)14U),
+                    14U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                      (uint32_t)18U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))),
+                      18U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, 41U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -1712,10 +1712,10 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-              (uint32_t)28U),
+              28U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-                (uint32_t)34U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))),
+                34U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, 39U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -1735,30 +1735,30 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-              (uint32_t)19U),
+              19U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-                (uint32_t)61U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U)));
+                61U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, 6U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-              (uint32_t)1U),
+              1U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-                (uint32_t)8U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U)));
+                8U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, 7U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1,
                 t7),
@@ -1766,9 +1766,9 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]);
@@ -1778,22 +1778,22 @@ sha384_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
 static inline void
 sha384_update_nblocks4(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)128U;
-    uint8_t *bl1 = b1 + i * (uint32_t)128U;
-    uint8_t *bl2 = b2 + i * (uint32_t)128U;
-    uint8_t *bl3 = b3 + i * (uint32_t)128U;
-    Hacl_Impl_SHA2_Types_uint8_4p
+    uint8_t *bl0 = b0 + i * 128U;
+    uint8_t *bl1 = b1 + i * 128U;
+    uint8_t *bl2 = b2 + i * 128U;
+    uint8_t *bl3 = b3 + i * 128U;
+    Hacl_Hash_SHA2_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha384_update4(mb, st);
   }
@@ -1803,69 +1803,69 @@ static inline void
 sha384_update_last4(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec256 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)256U;
-  uint8_t *last2 = last + (uint32_t)512U;
-  uint8_t *last3 = last + (uint32_t)768U;
+  uint8_t *last10 = last + 256U;
+  uint8_t *last2 = last + 512U;
+  uint8_t *last3 = last + 768U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)128U;
+  uint8_t *last110 = last00 + 128U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)128U;
+  uint8_t *last111 = last10 + 128U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)128U;
+  uint8_t *last112 = last2 + 128U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)128U;
+  uint8_t *last11 = last3 + 128U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } };
-  Hacl_Impl_SHA2_Types_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd;
   sha384_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha384_update4(last1, hash);
     return;
@@ -1873,7 +1873,7 @@ sha384_update_last4(
 }
 
 static inline void
-sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4p h)
+sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h)
 {
   uint8_t hbuf[256U] = { 0U };
   Lib_IntVector_Intrinsics_vec256 v00 = st[0U];
@@ -1933,18 +1933,18 @@ sha384_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * 32U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)48U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)48U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 48U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 64U, 48U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 128U, 48U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 192U, 48U * sizeof (uint8_t));
 }
 
 void
@@ -1960,16 +1960,16 @@ Hacl_SHA2_Vec256_sha384_4(
   uint8_t *input3
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha384_init4(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   sha384_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -1978,7 +1978,7 @@ Hacl_SHA2_Vec256_sha384_4(
   uint8_t *bl1 = b1 + input_len - rem1;
   uint8_t *bl2 = b2 + input_len - rem1;
   uint8_t *bl3 = b3 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
   sha384_update_last4(len_, rem, lb, st);
   sha384_finish4(st, rb);
@@ -1987,21 +1987,21 @@ Hacl_SHA2_Vec256_sha384_4(
 static inline void sha512_init4(Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
-    uint64_t hi = Hacl_Impl_SHA2_Generic_h512[i];
+    uint64_t hi = Hacl_Hash_SHA2_h512[i];
     Lib_IntVector_Intrinsics_vec256 x = Lib_IntVector_Intrinsics_vec256_load64(hi);
     os[i] = x;);
 }
 
 static inline void
-sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash)
+sha512_update4(Hacl_Hash_SHA2_uint8_4p b, Lib_IntVector_Intrinsics_vec256 *hash)
 {
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 hash_old[8U] KRML_POST_ALIGN(32) = { 0U };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 ws[16U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(hash_old, hash, (uint32_t)8U * sizeof (Lib_IntVector_Intrinsics_vec256));
+  memcpy(hash_old, hash, 8U * sizeof (Lib_IntVector_Intrinsics_vec256));
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b10 = b.snd.fst;
@@ -2010,18 +2010,18 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[1U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10);
   ws[2U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2);
   ws[3U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3);
-  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)32U);
-  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)32U);
-  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)32U);
-  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)32U);
-  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)64U);
-  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)64U);
-  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)64U);
-  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)64U);
-  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + (uint32_t)96U);
-  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + (uint32_t)96U);
-  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + (uint32_t)96U);
-  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + (uint32_t)96U);
+  ws[4U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 32U);
+  ws[5U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 32U);
+  ws[6U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 32U);
+  ws[7U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 32U);
+  ws[8U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 64U);
+  ws[9U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 64U);
+  ws[10U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 64U);
+  ws[11U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 64U);
+  ws[12U] = Lib_IntVector_Intrinsics_vec256_load64_be(b00 + 96U);
+  ws[13U] = Lib_IntVector_Intrinsics_vec256_load64_be(b10 + 96U);
+  ws[14U] = Lib_IntVector_Intrinsics_vec256_load64_be(b2 + 96U);
+  ws[15U] = Lib_IntVector_Intrinsics_vec256_load64_be(b3 + 96U);
   Lib_IntVector_Intrinsics_vec256 v00 = ws[0U];
   Lib_IntVector_Intrinsics_vec256 v10 = ws[1U];
   Lib_IntVector_Intrinsics_vec256 v20 = ws[2U];
@@ -2135,14 +2135,14 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
   ws[14U] = ws14;
   ws[15U] = ws15;
   KRML_MAYBE_FOR5(i0,
-    (uint32_t)0U,
-    (uint32_t)5U,
-    (uint32_t)1U,
+    0U,
+    5U,
+    1U,
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      uint64_t k_t = Hacl_Impl_SHA2_Generic_k384_512[(uint32_t)16U * i0 + i];
+      0U,
+      16U,
+      1U,
+      uint64_t k_t = Hacl_Hash_SHA2_k384_512[16U * i0 + i];
       Lib_IntVector_Intrinsics_vec256 ws_t = ws[i];
       Lib_IntVector_Intrinsics_vec256 a0 = hash[0U];
       Lib_IntVector_Intrinsics_vec256 b0 = hash[1U];
@@ -2157,10 +2157,10 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       t1 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(h02,
                 Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                    (uint32_t)14U),
+                    14U),
                   Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(e0,
-                      (uint32_t)18U),
-                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, (uint32_t)41U)))),
+                      18U),
+                    Lib_IntVector_Intrinsics_vec256_rotate_right64(e0, 41U)))),
               Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(e0, f0),
                 Lib_IntVector_Intrinsics_vec256_and(Lib_IntVector_Intrinsics_vec256_lognot(e0), g0))),
             k_e_t),
@@ -2168,10 +2168,10 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       Lib_IntVector_Intrinsics_vec256
       t2 =
         Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-              (uint32_t)28U),
+              28U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(a0,
-                (uint32_t)34U),
-              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, (uint32_t)39U))),
+                34U),
+              Lib_IntVector_Intrinsics_vec256_rotate_right64(a0, 39U))),
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, b0),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_and(a0, c0),
               Lib_IntVector_Intrinsics_vec256_and(b0, c0))));
@@ -2191,30 +2191,30 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
       hash[5U] = f1;
       hash[6U] = g1;
       hash[7U] = h12;);
-    if (i0 < (uint32_t)4U)
+    if (i0 < 4U)
     {
       KRML_MAYBE_FOR16(i,
-        (uint32_t)0U,
-        (uint32_t)16U,
-        (uint32_t)1U,
+        0U,
+        16U,
+        1U,
         Lib_IntVector_Intrinsics_vec256 t16 = ws[i];
-        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U];
-        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U];
+        Lib_IntVector_Intrinsics_vec256 t15 = ws[(i + 1U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t7 = ws[(i + 9U) % 16U];
+        Lib_IntVector_Intrinsics_vec256 t2 = ws[(i + 14U) % 16U];
         Lib_IntVector_Intrinsics_vec256
         s1 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-              (uint32_t)19U),
+              19U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t2,
-                (uint32_t)61U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, (uint32_t)6U)));
+                61U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t2, 6U)));
         Lib_IntVector_Intrinsics_vec256
         s0 =
           Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-              (uint32_t)1U),
+              1U),
             Lib_IntVector_Intrinsics_vec256_xor(Lib_IntVector_Intrinsics_vec256_rotate_right64(t15,
-                (uint32_t)8U),
-              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, (uint32_t)7U)));
+                8U),
+              Lib_IntVector_Intrinsics_vec256_shift_right64(t15, 7U)));
         ws[i] =
           Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(Lib_IntVector_Intrinsics_vec256_add64(s1,
                 t7),
@@ -2222,9 +2222,9 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
             t16););
     });
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     Lib_IntVector_Intrinsics_vec256 *os = hash;
     Lib_IntVector_Intrinsics_vec256
     x = Lib_IntVector_Intrinsics_vec256_add64(hash[i], hash_old[i]);
@@ -2234,22 +2234,22 @@ sha512_update4(Hacl_Impl_SHA2_Types_uint8_4p b, Lib_IntVector_Intrinsics_vec256
 static inline void
 sha512_update_nblocks4(
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec256 *st
 )
 {
-  uint32_t blocks = len / (uint32_t)128U;
-  for (uint32_t i = (uint32_t)0U; i < blocks; i++)
+  uint32_t blocks = len / 128U;
+  for (uint32_t i = 0U; i < blocks; i++)
   {
     uint8_t *b3 = b.snd.snd.snd;
     uint8_t *b2 = b.snd.snd.fst;
     uint8_t *b1 = b.snd.fst;
     uint8_t *b0 = b.fst;
-    uint8_t *bl0 = b0 + i * (uint32_t)128U;
-    uint8_t *bl1 = b1 + i * (uint32_t)128U;
-    uint8_t *bl2 = b2 + i * (uint32_t)128U;
-    uint8_t *bl3 = b3 + i * (uint32_t)128U;
-    Hacl_Impl_SHA2_Types_uint8_4p
+    uint8_t *bl0 = b0 + i * 128U;
+    uint8_t *bl1 = b1 + i * 128U;
+    uint8_t *bl2 = b2 + i * 128U;
+    uint8_t *bl3 = b3 + i * 128U;
+    Hacl_Hash_SHA2_uint8_4p
     mb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
     sha512_update4(mb, st);
   }
@@ -2259,69 +2259,69 @@ static inline void
 sha512_update_last4(
   FStar_UInt128_uint128 totlen,
   uint32_t len,
-  Hacl_Impl_SHA2_Types_uint8_4p b,
+  Hacl_Hash_SHA2_uint8_4p b,
   Lib_IntVector_Intrinsics_vec256 *hash
 )
 {
   uint32_t blocks;
-  if (len + (uint32_t)16U + (uint32_t)1U <= (uint32_t)128U)
+  if (len + 16U + 1U <= 128U)
   {
-    blocks = (uint32_t)1U;
+    blocks = 1U;
   }
   else
   {
-    blocks = (uint32_t)2U;
+    blocks = 2U;
   }
-  uint32_t fin = blocks * (uint32_t)128U;
+  uint32_t fin = blocks * 128U;
   uint8_t last[1024U] = { 0U };
   uint8_t totlen_buf[16U] = { 0U };
-  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, (uint32_t)3U);
+  FStar_UInt128_uint128 total_len_bits = FStar_UInt128_shift_left(totlen, 3U);
   store128_be(totlen_buf, total_len_bits);
   uint8_t *b3 = b.snd.snd.snd;
   uint8_t *b2 = b.snd.snd.fst;
   uint8_t *b1 = b.snd.fst;
   uint8_t *b0 = b.fst;
   uint8_t *last00 = last;
-  uint8_t *last10 = last + (uint32_t)256U;
-  uint8_t *last2 = last + (uint32_t)512U;
-  uint8_t *last3 = last + (uint32_t)768U;
+  uint8_t *last10 = last + 256U;
+  uint8_t *last2 = last + 512U;
+  uint8_t *last3 = last + 768U;
   memcpy(last00, b0, len * sizeof (uint8_t));
-  last00[len] = (uint8_t)0x80U;
-  memcpy(last00 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last00[len] = 0x80U;
+  memcpy(last00 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last010 = last00;
-  uint8_t *last110 = last00 + (uint32_t)128U;
+  uint8_t *last110 = last00 + 128U;
   uint8_t *l00 = last010;
   uint8_t *l01 = last110;
   memcpy(last10, b1, len * sizeof (uint8_t));
-  last10[len] = (uint8_t)0x80U;
-  memcpy(last10 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last10[len] = 0x80U;
+  memcpy(last10 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last011 = last10;
-  uint8_t *last111 = last10 + (uint32_t)128U;
+  uint8_t *last111 = last10 + 128U;
   uint8_t *l10 = last011;
   uint8_t *l11 = last111;
   memcpy(last2, b2, len * sizeof (uint8_t));
-  last2[len] = (uint8_t)0x80U;
-  memcpy(last2 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last2[len] = 0x80U;
+  memcpy(last2 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last012 = last2;
-  uint8_t *last112 = last2 + (uint32_t)128U;
+  uint8_t *last112 = last2 + 128U;
   uint8_t *l20 = last012;
   uint8_t *l21 = last112;
   memcpy(last3, b3, len * sizeof (uint8_t));
-  last3[len] = (uint8_t)0x80U;
-  memcpy(last3 + fin - (uint32_t)16U, totlen_buf, (uint32_t)16U * sizeof (uint8_t));
+  last3[len] = 0x80U;
+  memcpy(last3 + fin - 16U, totlen_buf, 16U * sizeof (uint8_t));
   uint8_t *last01 = last3;
-  uint8_t *last11 = last3 + (uint32_t)128U;
+  uint8_t *last11 = last3 + 128U;
   uint8_t *l30 = last01;
   uint8_t *l31 = last11;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb0 = { .fst = l00, .snd = { .fst = l10, .snd = { .fst = l20, .snd = l30 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   mb1 = { .fst = l01, .snd = { .fst = l11, .snd = { .fst = l21, .snd = l31 } } };
-  Hacl_Impl_SHA2_Types_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
-  Hacl_Impl_SHA2_Types_uint8_4p last0 = scrut.fst;
-  Hacl_Impl_SHA2_Types_uint8_4p last1 = scrut.snd;
+  Hacl_Hash_SHA2_uint8_2x4p scrut = { .fst = mb0, .snd = mb1 };
+  Hacl_Hash_SHA2_uint8_4p last0 = scrut.fst;
+  Hacl_Hash_SHA2_uint8_4p last1 = scrut.snd;
   sha512_update4(last0, hash);
-  if (blocks > (uint32_t)1U)
+  if (blocks > 1U)
   {
     sha512_update4(last1, hash);
     return;
@@ -2329,7 +2329,7 @@ sha512_update_last4(
 }
 
 static inline void
-sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4p h)
+sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Hash_SHA2_uint8_4p h)
 {
   uint8_t hbuf[256U] = { 0U };
   Lib_IntVector_Intrinsics_vec256 v00 = st[0U];
@@ -2389,18 +2389,18 @@ sha512_finish4(Lib_IntVector_Intrinsics_vec256 *st, Hacl_Impl_SHA2_Types_uint8_4
   st[6U] = st3_;
   st[7U] = st7_;
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * (uint32_t)32U, st[i]););
+    0U,
+    8U,
+    1U,
+    Lib_IntVector_Intrinsics_vec256_store64_be(hbuf + i * 32U, st[i]););
   uint8_t *b3 = h.snd.snd.snd;
   uint8_t *b2 = h.snd.snd.fst;
   uint8_t *b1 = h.snd.fst;
   uint8_t *b0 = h.fst;
-  memcpy(b0, hbuf, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b1, hbuf + (uint32_t)64U, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b2, hbuf + (uint32_t)128U, (uint32_t)64U * sizeof (uint8_t));
-  memcpy(b3, hbuf + (uint32_t)192U, (uint32_t)64U * sizeof (uint8_t));
+  memcpy(b0, hbuf, 64U * sizeof (uint8_t));
+  memcpy(b1, hbuf + 64U, 64U * sizeof (uint8_t));
+  memcpy(b2, hbuf + 128U, 64U * sizeof (uint8_t));
+  memcpy(b3, hbuf + 192U, 64U * sizeof (uint8_t));
 }
 
 void
@@ -2416,16 +2416,16 @@ Hacl_SHA2_Vec256_sha512_4(
   uint8_t *input3
 )
 {
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   ib = { .fst = input0, .snd = { .fst = input1, .snd = { .fst = input2, .snd = input3 } } };
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   rb = { .fst = dst0, .snd = { .fst = dst1, .snd = { .fst = dst2, .snd = dst3 } } };
   KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 st[8U] KRML_POST_ALIGN(32) = { 0U };
   sha512_init4(st);
-  uint32_t rem = input_len % (uint32_t)128U;
+  uint32_t rem = input_len % 128U;
   FStar_UInt128_uint128 len_ = FStar_UInt128_uint64_to_uint128((uint64_t)input_len);
   sha512_update_nblocks4(input_len, ib, st);
-  uint32_t rem1 = input_len % (uint32_t)128U;
+  uint32_t rem1 = input_len % 128U;
   uint8_t *b3 = ib.snd.snd.snd;
   uint8_t *b2 = ib.snd.snd.fst;
   uint8_t *b1 = ib.snd.fst;
@@ -2434,7 +2434,7 @@ Hacl_SHA2_Vec256_sha512_4(
   uint8_t *bl1 = b1 + input_len - rem1;
   uint8_t *bl2 = b2 + input_len - rem1;
   uint8_t *bl3 = b3 + input_len - rem1;
-  Hacl_Impl_SHA2_Types_uint8_4p
+  Hacl_Hash_SHA2_uint8_4p
   lb = { .fst = bl0, .snd = { .fst = bl1, .snd = { .fst = bl2, .snd = bl3 } } };
   sha512_update_last4(len_, rem, lb, st);
   sha512_finish4(st, rb);
diff --git a/src/msvc/Hacl_Salsa20.c b/src/msvc/Hacl_Salsa20.c
index 2758f8a4..151df07d 100644
--- a/src/msvc/Hacl_Salsa20.c
+++ b/src/msvc/Hacl_Salsa20.c
@@ -30,35 +30,35 @@ static inline void quarter_round(uint32_t *st, uint32_t a, uint32_t b, uint32_t
   uint32_t sta = st[b];
   uint32_t stb0 = st[a];
   uint32_t std0 = st[d];
-  uint32_t sta1 = sta ^ ((stb0 + std0) << (uint32_t)7U | (stb0 + std0) >> (uint32_t)25U);
+  uint32_t sta1 = sta ^ ((stb0 + std0) << 7U | (stb0 + std0) >> 25U);
   st[b] = sta1;
   uint32_t sta0 = st[c];
   uint32_t stb1 = st[b];
   uint32_t std1 = st[a];
-  uint32_t sta10 = sta0 ^ ((stb1 + std1) << (uint32_t)9U | (stb1 + std1) >> (uint32_t)23U);
+  uint32_t sta10 = sta0 ^ ((stb1 + std1) << 9U | (stb1 + std1) >> 23U);
   st[c] = sta10;
   uint32_t sta2 = st[d];
   uint32_t stb2 = st[c];
   uint32_t std2 = st[b];
-  uint32_t sta11 = sta2 ^ ((stb2 + std2) << (uint32_t)13U | (stb2 + std2) >> (uint32_t)19U);
+  uint32_t sta11 = sta2 ^ ((stb2 + std2) << 13U | (stb2 + std2) >> 19U);
   st[d] = sta11;
   uint32_t sta3 = st[a];
   uint32_t stb = st[d];
   uint32_t std = st[c];
-  uint32_t sta12 = sta3 ^ ((stb + std) << (uint32_t)18U | (stb + std) >> (uint32_t)14U);
+  uint32_t sta12 = sta3 ^ ((stb + std) << 18U | (stb + std) >> 14U);
   st[a] = sta12;
 }
 
 static inline void double_round(uint32_t *st)
 {
-  quarter_round(st, (uint32_t)0U, (uint32_t)4U, (uint32_t)8U, (uint32_t)12U);
-  quarter_round(st, (uint32_t)5U, (uint32_t)9U, (uint32_t)13U, (uint32_t)1U);
-  quarter_round(st, (uint32_t)10U, (uint32_t)14U, (uint32_t)2U, (uint32_t)6U);
-  quarter_round(st, (uint32_t)15U, (uint32_t)3U, (uint32_t)7U, (uint32_t)11U);
-  quarter_round(st, (uint32_t)0U, (uint32_t)1U, (uint32_t)2U, (uint32_t)3U);
-  quarter_round(st, (uint32_t)5U, (uint32_t)6U, (uint32_t)7U, (uint32_t)4U);
-  quarter_round(st, (uint32_t)10U, (uint32_t)11U, (uint32_t)8U, (uint32_t)9U);
-  quarter_round(st, (uint32_t)15U, (uint32_t)12U, (uint32_t)13U, (uint32_t)14U);
+  quarter_round(st, 0U, 4U, 8U, 12U);
+  quarter_round(st, 5U, 9U, 13U, 1U);
+  quarter_round(st, 10U, 14U, 2U, 6U);
+  quarter_round(st, 15U, 3U, 7U, 11U);
+  quarter_round(st, 0U, 1U, 2U, 3U);
+  quarter_round(st, 5U, 6U, 7U, 4U);
+  quarter_round(st, 10U, 11U, 8U, 9U);
+  quarter_round(st, 15U, 12U, 13U, 14U);
 }
 
 static inline void rounds(uint32_t *st)
@@ -77,14 +77,14 @@ static inline void rounds(uint32_t *st)
 
 static inline void salsa20_core(uint32_t *k, uint32_t *ctx, uint32_t ctr)
 {
-  memcpy(k, ctx, (uint32_t)16U * sizeof (uint32_t));
+  memcpy(k, ctx, 16U * sizeof (uint32_t));
   uint32_t ctr_u32 = ctr;
   k[8U] = k[8U] + ctr_u32;
   rounds(k);
   KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
+    0U,
+    16U,
+    1U,
     uint32_t *os = k;
     uint32_t x = k[i] + ctx[i];
     os[i] = x;);
@@ -98,42 +98,38 @@ static inline void salsa20_key_block0(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k1 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
-  ctx[8U] = (uint32_t)0U;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
-  salsa20_core(k, ctx, (uint32_t)0U);
-  KRML_MAYBE_FOR16(i,
-    (uint32_t)0U,
-    (uint32_t)16U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, k[i]););
+  uint32_t *k1 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
+  ctx[8U] = 0U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k1, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
+  salsa20_core(k, ctx, 0U);
+  KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(out + i * 4U, k[i]););
 }
 
 static inline void
@@ -150,101 +146,93 @@ salsa20_encrypt(
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k10 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
+  uint32_t *k10 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
   ctx[8U] = ctr;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k10, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   uint32_t k[16U] = { 0U };
-  KRML_HOST_IGNORE(k);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  KRML_MAYBE_UNUSED_VAR(k);
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = text + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = text + i0 * 64U;
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, i0);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = uu____1 + i * (uint32_t)4U;
+      uint8_t *bj = uu____1 + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(uu____0 + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, text + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, nb);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = plain + i * (uint32_t)4U;
+      uint8_t *bj = plain + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(plain + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -263,101 +251,93 @@ salsa20_decrypt(
   uint32_t k32[8U] = { 0U };
   uint32_t n32[2U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR2(i,
-    (uint32_t)0U,
-    (uint32_t)2U,
-    (uint32_t)1U,
+    0U,
+    2U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
-  ctx[0U] = (uint32_t)0x61707865U;
+  ctx[0U] = 0x61707865U;
   uint32_t *k0 = k32;
-  uint32_t *k10 = k32 + (uint32_t)4U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)2U * sizeof (uint32_t));
+  uint32_t *k10 = k32 + 4U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 2U * sizeof (uint32_t));
   ctx[8U] = ctr;
-  ctx[9U] = (uint32_t)0U;
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  ctx[9U] = 0U;
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k10, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   uint32_t k[16U] = { 0U };
-  KRML_HOST_IGNORE(k);
-  uint32_t rem = len % (uint32_t)64U;
-  uint32_t nb = len / (uint32_t)64U;
-  uint32_t rem1 = len % (uint32_t)64U;
-  for (uint32_t i0 = (uint32_t)0U; i0 < nb; i0++)
+  KRML_MAYBE_UNUSED_VAR(k);
+  uint32_t rem = len % 64U;
+  uint32_t nb = len / 64U;
+  uint32_t rem1 = len % 64U;
+  for (uint32_t i0 = 0U; i0 < nb; i0++)
   {
-    uint8_t *uu____0 = out + i0 * (uint32_t)64U;
-    uint8_t *uu____1 = cipher + i0 * (uint32_t)64U;
+    uint8_t *uu____0 = out + i0 * 64U;
+    uint8_t *uu____1 = cipher + i0 * 64U;
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, i0);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = uu____1 + i * (uint32_t)4U;
+      uint8_t *bj = uu____1 + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(uu____0 + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(uu____0 + i * 4U, bl[i]););
   }
-  if (rem1 > (uint32_t)0U)
+  if (rem1 > 0U)
   {
-    uint8_t *uu____2 = out + nb * (uint32_t)64U;
+    uint8_t *uu____2 = out + nb * 64U;
     uint8_t plain[64U] = { 0U };
-    memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t));
+    memcpy(plain, cipher + nb * 64U, rem * sizeof (uint8_t));
     uint32_t k1[16U] = { 0U };
     salsa20_core(k1, ctx, nb);
     uint32_t bl[16U] = { 0U };
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
-      uint8_t *bj = plain + i * (uint32_t)4U;
+      uint8_t *bj = plain + i * 4U;
       uint32_t u = load32_le(bj);
       uint32_t r = u;
       uint32_t x = r;
       os[i] = x;);
     KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
+      0U,
+      16U,
+      1U,
       uint32_t *os = bl;
       uint32_t x = bl[i] ^ k1[i];
       os[i] = x;);
-    KRML_MAYBE_FOR16(i,
-      (uint32_t)0U,
-      (uint32_t)16U,
-      (uint32_t)1U,
-      store32_le(plain + i * (uint32_t)4U, bl[i]););
+    KRML_MAYBE_FOR16(i, 0U, 16U, 1U, store32_le(plain + i * 4U, bl[i]););
     memcpy(uu____2, plain, rem * sizeof (uint8_t));
   }
 }
@@ -368,34 +348,34 @@ static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t k32[8U] = { 0U };
   uint32_t n32[4U] = { 0U };
   KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
+    0U,
+    8U,
+    1U,
     uint32_t *os = k32;
-    uint8_t *bj = key + i * (uint32_t)4U;
+    uint8_t *bj = key + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   KRML_MAYBE_FOR4(i,
-    (uint32_t)0U,
-    (uint32_t)4U,
-    (uint32_t)1U,
+    0U,
+    4U,
+    1U,
     uint32_t *os = n32;
-    uint8_t *bj = n + i * (uint32_t)4U;
+    uint8_t *bj = n + i * 4U;
     uint32_t u = load32_le(bj);
     uint32_t r = u;
     uint32_t x = r;
     os[i] = x;);
   uint32_t *k0 = k32;
-  uint32_t *k1 = k32 + (uint32_t)4U;
-  ctx[0U] = (uint32_t)0x61707865U;
-  memcpy(ctx + (uint32_t)1U, k0, (uint32_t)4U * sizeof (uint32_t));
-  ctx[5U] = (uint32_t)0x3320646eU;
-  memcpy(ctx + (uint32_t)6U, n32, (uint32_t)4U * sizeof (uint32_t));
-  ctx[10U] = (uint32_t)0x79622d32U;
-  memcpy(ctx + (uint32_t)11U, k1, (uint32_t)4U * sizeof (uint32_t));
-  ctx[15U] = (uint32_t)0x6b206574U;
+  uint32_t *k1 = k32 + 4U;
+  ctx[0U] = 0x61707865U;
+  memcpy(ctx + 1U, k0, 4U * sizeof (uint32_t));
+  ctx[5U] = 0x3320646eU;
+  memcpy(ctx + 6U, n32, 4U * sizeof (uint32_t));
+  ctx[10U] = 0x79622d32U;
+  memcpy(ctx + 11U, k1, 4U * sizeof (uint32_t));
+  ctx[15U] = 0x6b206574U;
   rounds(ctx);
   uint32_t r0 = ctx[0U];
   uint32_t r1 = ctx[5U];
@@ -406,11 +386,7 @@ static inline void hsalsa20(uint8_t *out, uint8_t *key, uint8_t *n)
   uint32_t r6 = ctx[8U];
   uint32_t r7 = ctx[9U];
   uint32_t res[8U] = { r0, r1, r2, r3, r4, r5, r6, r7 };
-  KRML_MAYBE_FOR8(i,
-    (uint32_t)0U,
-    (uint32_t)8U,
-    (uint32_t)1U,
-    store32_le(out + i * (uint32_t)4U, res[i]););
+  KRML_MAYBE_FOR8(i, 0U, 8U, 1U, store32_le(out + i * 4U, res[i]););
 }
 
 void
diff --git a/src/msvc/Hacl_Streaming_Blake2.c b/src/msvc/Hacl_Streaming_Blake2.c
deleted file mode 100644
index 948d56c2..00000000
--- a/src/msvc/Hacl_Streaming_Blake2.c
+++ /dev/null
@@ -1,655 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Blake2.h"
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2_blake2s_32_state *Hacl_Streaming_Blake2_blake2s_32_no_key_create_in(void)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  uint32_t *wv = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
-  uint32_t *b = (uint32_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint32_t));
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state = { .fst = wv, .snd = b };
-  Hacl_Streaming_Blake2_blake2s_32_state
-  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Blake2_blake2s_32_state
-  *p =
-    (Hacl_Streaming_Blake2_blake2s_32_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Blake2_blake2s_32_state
-      ));
-  p[0U] = s1;
-  Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
-  return p;
-}
-
-/**
-  (Re-)initialization function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_32_state *s1)
-{
-  Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state;
-  Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
-  Hacl_Streaming_Blake2_blake2s_32_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s1[0U] = tmp;
-}
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2_blake2s_32_no_key_update(
-  Hacl_Streaming_Blake2_blake2s_32_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Blake2_blake2s_32_state s1 = *p;
-  uint64_t total_len = s1.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)64U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  if (len <= (uint32_t)64U - sz)
-  {
-    Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state;
-    uint8_t *buf = s2.buf;
-    uint64_t total_len1 = s2.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2s_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s2.block_state;
-    uint8_t *buf = s2.buf;
-    uint64_t total_len1 = s2.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      uint32_t *wv = block_state1.fst;
-      uint32_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    uint32_t *wv = block_state1.fst;
-    uint32_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
-    Hacl_Blake2s_32_blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2s_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Blake2_blake2s_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2s_32_block_state block_state10 = s2.block_state;
-    uint8_t *buf0 = s2.buf;
-    uint64_t total_len10 = s2.total_len;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)64U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2s_32_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2
-        }
-      );
-    Hacl_Streaming_Blake2_blake2s_32_state s20 = *p;
-    Hacl_Streaming_Blake2_blake2s_32_block_state block_state1 = s20.block_state;
-    uint8_t *buf = s20.buf;
-    uint64_t total_len1 = s20.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      uint32_t *wv = block_state1.fst;
-      uint32_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_32_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    uint32_t *wv = block_state1.fst;
-    uint32_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
-    Hacl_Blake2s_32_blake2s_update_multi(data1_len, wv, hash, total_len1, data11, nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2s_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2_blake2s_32_no_key_finish(
-  Hacl_Streaming_Blake2_blake2s_32_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Blake2_blake2s_32_state scrut = *p;
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)64U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  uint8_t *buf_1 = buf_;
-  uint32_t wv0[16U] = { 0U };
-  uint32_t b[16U] = { 0U };
-  Hacl_Streaming_Blake2_blake2s_32_block_state tmp_block_state = { .fst = wv0, .snd = b };
-  uint32_t *src_b = block_state.snd;
-  uint32_t *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint32_t));
-  uint64_t prev_len = total_len - (uint64_t)r;
-  uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)64U;
-  }
-  else
-  {
-    ite = r % (uint32_t)64U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  uint32_t *wv1 = tmp_block_state.fst;
-  uint32_t *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2s_32_blake2s_update_multi((uint32_t)0U, wv1, hash0, prev_len, buf_multi, nb);
-  uint64_t prev_len_last = total_len - (uint64_t)r;
-  uint32_t *wv = tmp_block_state.fst;
-  uint32_t *hash = tmp_block_state.snd;
-  Hacl_Blake2s_32_blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
-  Hacl_Blake2s_32_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd);
-}
-
-/**
-  Free state function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2s_32_no_key_free(Hacl_Streaming_Blake2_blake2s_32_state *s1)
-{
-  Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state;
-  uint32_t *wv = block_state.fst;
-  uint32_t *b = block_state.snd;
-  KRML_HOST_FREE(wv);
-  KRML_HOST_FREE(b);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s1);
-}
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2_blake2b_32_state *Hacl_Streaming_Blake2_blake2b_32_no_key_create_in(void)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  uint64_t *wv = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
-  uint64_t *b = (uint64_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint64_t));
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state = { .fst = wv, .snd = b };
-  Hacl_Streaming_Blake2_blake2b_32_state
-  s1 = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Blake2_blake2b_32_state
-  *p =
-    (Hacl_Streaming_Blake2_blake2b_32_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Blake2_blake2b_32_state
-      ));
-  p[0U] = s1;
-  Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
-  return p;
-}
-
-/**
-  (Re)-initialization function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_32_state *s1)
-{
-  Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state;
-  Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
-  Hacl_Streaming_Blake2_blake2b_32_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s1[0U] = tmp;
-}
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2_blake2b_32_no_key_update(
-  Hacl_Streaming_Blake2_blake2b_32_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Blake2_blake2b_32_state s1 = *p;
-  uint64_t total_len = s1.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)128U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
-  }
-  if (len <= (uint32_t)128U - sz)
-  {
-    Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state;
-    uint8_t *buf = s2.buf;
-    uint64_t total_len1 = s2.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2b_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s2.block_state;
-    uint8_t *buf = s2.buf;
-    uint64_t total_len1 = s2.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      uint64_t *wv = block_state1.fst;
-      uint64_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-        wv,
-        hash,
-        FStar_UInt128_uint64_to_uint128(prevlen),
-        buf,
-        nb);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)128U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    uint64_t *wv = block_state1.fst;
-    uint64_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
-    Hacl_Blake2b_32_blake2b_update_multi(data1_len,
-      wv,
-      hash,
-      FStar_UInt128_uint64_to_uint128(total_len1),
-      data1,
-      nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2b_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)128U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Blake2_blake2b_32_state s2 = *p;
-    Hacl_Streaming_Blake2_blake2b_32_block_state block_state10 = s2.block_state;
-    uint8_t *buf0 = s2.buf;
-    uint64_t total_len10 = s2.total_len;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)128U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2b_32_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2
-        }
-      );
-    Hacl_Streaming_Blake2_blake2b_32_state s20 = *p;
-    Hacl_Streaming_Blake2_blake2b_32_block_state block_state1 = s20.block_state;
-    uint8_t *buf = s20.buf;
-    uint64_t total_len1 = s20.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      uint64_t *wv = block_state1.fst;
-      uint64_t *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_32_blake2b_update_multi((uint32_t)128U,
-        wv,
-        hash,
-        FStar_UInt128_uint64_to_uint128(prevlen),
-        buf,
-        nb);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)128U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    uint64_t *wv = block_state1.fst;
-    uint64_t *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
-    Hacl_Blake2b_32_blake2b_update_multi(data1_len,
-      wv,
-      hash,
-      FStar_UInt128_uint64_to_uint128(total_len1),
-      data11,
-      nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2_blake2b_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2_blake2b_32_no_key_finish(
-  Hacl_Streaming_Blake2_blake2b_32_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Blake2_blake2b_32_state scrut = *p;
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)128U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
-  }
-  uint8_t *buf_1 = buf_;
-  uint64_t wv0[16U] = { 0U };
-  uint64_t b[16U] = { 0U };
-  Hacl_Streaming_Blake2_blake2b_32_block_state tmp_block_state = { .fst = wv0, .snd = b };
-  uint64_t *src_b = block_state.snd;
-  uint64_t *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)16U * sizeof (uint64_t));
-  uint64_t prev_len = total_len - (uint64_t)r;
-  uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)128U;
-  }
-  else
-  {
-    ite = r % (uint32_t)128U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  uint64_t *wv1 = tmp_block_state.fst;
-  uint64_t *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2b_32_blake2b_update_multi((uint32_t)0U,
-    wv1,
-    hash0,
-    FStar_UInt128_uint64_to_uint128(prev_len),
-    buf_multi,
-    nb);
-  uint64_t prev_len_last = total_len - (uint64_t)r;
-  uint64_t *wv = tmp_block_state.fst;
-  uint64_t *hash = tmp_block_state.snd;
-  Hacl_Blake2b_32_blake2b_update_last(r,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128(prev_len_last),
-    r,
-    buf_last);
-  Hacl_Blake2b_32_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd);
-}
-
-/**
-  Free state function when there is no key
-*/
-void Hacl_Streaming_Blake2_blake2b_32_no_key_free(Hacl_Streaming_Blake2_blake2b_32_state *s1)
-{
-  Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state;
-  uint64_t *wv = block_state.fst;
-  uint64_t *b = block_state.snd;
-  KRML_HOST_FREE(wv);
-  KRML_HOST_FREE(b);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s1);
-}
-
diff --git a/src/msvc/Hacl_Streaming_Blake2b_256.c b/src/msvc/Hacl_Streaming_Blake2b_256.c
deleted file mode 100644
index bdb5433f..00000000
--- a/src/msvc/Hacl_Streaming_Blake2b_256.c
+++ /dev/null
@@ -1,371 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Blake2b_256.h"
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2b_256_blake2b_256_state
-*Hacl_Streaming_Blake2b_256_blake2b_256_no_key_create_in(void)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)128U, sizeof (uint8_t));
-  Lib_IntVector_Intrinsics_vec256
-  *wv =
-    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Lib_IntVector_Intrinsics_vec256
-  *b =
-    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)4U);
-  memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = { .fst = wv, .snd = b };
-  Hacl_Streaming_Blake2b_256_blake2b_256_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Blake2b_256_blake2b_256_state
-  *p =
-    (Hacl_Streaming_Blake2b_256_blake2b_256_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Blake2b_256_blake2b_256_state
-      ));
-  p[0U] = s;
-  Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
-  return p;
-}
-
-/**
-  (Re-)initialization function when there is no key
-*/
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *s
-)
-{
-  Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state;
-  Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U);
-  Hacl_Streaming_Blake2b_256_blake2b_256_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
-}
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_update(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Blake2b_256_blake2b_256_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)128U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
-  }
-  if (len <= (uint32_t)128U - sz)
-  {
-    Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
-    Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2b_256_blake2b_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
-    Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
-      Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
-        wv,
-        hash,
-        FStar_UInt128_uint64_to_uint128(prevlen),
-        buf,
-        nb);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)128U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)128U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)128U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
-    Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
-    Hacl_Blake2b_256_blake2b_update_multi(data1_len,
-      wv,
-      hash,
-      FStar_UInt128_uint64_to_uint128(total_len1),
-      data1,
-      nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2b_256_blake2b_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)128U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Blake2b_256_blake2b_256_state s1 = *p;
-    Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)128U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)128U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2b_256_blake2b_256_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2
-        }
-      );
-    Hacl_Streaming_Blake2b_256_blake2b_256_state s10 = *p;
-    Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)128U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)128U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
-      Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2b_256_blake2b_update_multi((uint32_t)128U,
-        wv,
-        hash,
-        FStar_UInt128_uint64_to_uint128(prevlen),
-        buf,
-        nb);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)128U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)128U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)128U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)128U;
-    uint32_t data1_len = n_blocks * (uint32_t)128U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Lib_IntVector_Intrinsics_vec256 *wv = block_state1.fst;
-    Lib_IntVector_Intrinsics_vec256 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)128U;
-    Hacl_Blake2b_256_blake2b_update_multi(data1_len,
-      wv,
-      hash,
-      FStar_UInt128_uint64_to_uint128(total_len1),
-      data11,
-      nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2b_256_blake2b_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_finish(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *p;
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)128U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)128U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)128U);
-  }
-  uint8_t *buf_1 = buf_;
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 wv0[4U] KRML_POST_ALIGN(32) = { 0U };
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 b[4U] KRML_POST_ALIGN(32) = { 0U };
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state tmp_block_state = { .fst = wv0, .snd = b };
-  Lib_IntVector_Intrinsics_vec256 *src_b = block_state.snd;
-  Lib_IntVector_Intrinsics_vec256 *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  uint64_t prev_len = total_len - (uint64_t)r;
-  uint32_t ite;
-  if (r % (uint32_t)128U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)128U;
-  }
-  else
-  {
-    ite = r % (uint32_t)128U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  Lib_IntVector_Intrinsics_vec256 *wv1 = tmp_block_state.fst;
-  Lib_IntVector_Intrinsics_vec256 *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2b_256_blake2b_update_multi((uint32_t)0U,
-    wv1,
-    hash0,
-    FStar_UInt128_uint64_to_uint128(prev_len),
-    buf_multi,
-    nb);
-  uint64_t prev_len_last = total_len - (uint64_t)r;
-  Lib_IntVector_Intrinsics_vec256 *wv = tmp_block_state.fst;
-  Lib_IntVector_Intrinsics_vec256 *hash = tmp_block_state.snd;
-  Hacl_Blake2b_256_blake2b_update_last(r,
-    wv,
-    hash,
-    FStar_UInt128_uint64_to_uint128(prev_len_last),
-    r,
-    buf_last);
-  Hacl_Blake2b_256_blake2b_finish((uint32_t)64U, dst, tmp_block_state.snd);
-}
-
-/**
-  Free state function when there is no key
-*/
-void
-Hacl_Streaming_Blake2b_256_blake2b_256_no_key_free(
-  Hacl_Streaming_Blake2b_256_blake2b_256_state *s
-)
-{
-  Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state;
-  Lib_IntVector_Intrinsics_vec256 *wv = block_state.fst;
-  Lib_IntVector_Intrinsics_vec256 *b = block_state.snd;
-  KRML_ALIGNED_FREE(wv);
-  KRML_ALIGNED_FREE(b);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/msvc/Hacl_Streaming_Blake2s_128.c b/src/msvc/Hacl_Streaming_Blake2s_128.c
deleted file mode 100644
index f97bf5d0..00000000
--- a/src/msvc/Hacl_Streaming_Blake2s_128.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Blake2s_128.h"
-
-/**
-  State allocation function when there is no key
-*/
-Hacl_Streaming_Blake2s_128_blake2s_128_state
-*Hacl_Streaming_Blake2s_128_blake2s_128_no_key_create_in(void)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  Lib_IntVector_Intrinsics_vec128
-  *wv =
-    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(wv, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Lib_IntVector_Intrinsics_vec128
-  *b =
-    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)4U);
-  memset(b, 0U, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = { .fst = wv, .snd = b };
-  Hacl_Streaming_Blake2s_128_blake2s_128_state
-  s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  Hacl_Streaming_Blake2s_128_blake2s_128_state
-  *p =
-    (Hacl_Streaming_Blake2s_128_blake2s_128_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Blake2s_128_blake2s_128_state
-      ));
-  p[0U] = s;
-  Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
-  return p;
-}
-
-/**
-  (Re-)initialization function when there is no key
-*/
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *s
-)
-{
-  Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state;
-  Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U);
-  Hacl_Streaming_Blake2s_128_blake2s_128_state
-  tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U };
-  s[0U] = tmp;
-}
-
-/**
-  Update function when there is no key; 0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_update(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Blake2s_128_blake2s_128_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)64U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  if (len <= (uint32_t)64U - sz)
-  {
-    Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
-    Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2s_128_blake2s_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
-    Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
-      Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
-    Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
-    Hacl_Blake2s_128_blake2s_update_multi(data1_len, wv, hash, total_len1, data1, nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2s_128_blake2s_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Blake2s_128_blake2s_128_state s1 = *p;
-    Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)64U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2s_128_blake2s_128_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2
-        }
-      );
-    Hacl_Streaming_Blake2s_128_blake2s_128_state s10 = *p;
-    Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      uint64_t prevlen = total_len1 - (uint64_t)sz1;
-      Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
-      Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-      uint32_t nb = (uint32_t)1U;
-      Hacl_Blake2s_128_blake2s_update_multi((uint32_t)64U, wv, hash, prevlen, buf, nb);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Lib_IntVector_Intrinsics_vec128 *wv = block_state1.fst;
-    Lib_IntVector_Intrinsics_vec128 *hash = block_state1.snd;
-    uint32_t nb = data1_len / (uint32_t)64U;
-    Hacl_Blake2s_128_blake2s_update_multi(data1_len, wv, hash, total_len1, data11, nb);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Blake2s_128_blake2s_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff)
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-/**
-  Finish function when there is no key
-*/
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_finish(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *p;
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)64U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  uint8_t *buf_1 = buf_;
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 wv0[4U] KRML_POST_ALIGN(16) = { 0U };
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 b[4U] KRML_POST_ALIGN(16) = { 0U };
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state tmp_block_state = { .fst = wv0, .snd = b };
-  Lib_IntVector_Intrinsics_vec128 *src_b = block_state.snd;
-  Lib_IntVector_Intrinsics_vec128 *dst_b = tmp_block_state.snd;
-  memcpy(dst_b, src_b, (uint32_t)4U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  uint64_t prev_len = total_len - (uint64_t)r;
-  uint32_t ite;
-  if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)64U;
-  }
-  else
-  {
-    ite = r % (uint32_t)64U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  Lib_IntVector_Intrinsics_vec128 *wv1 = tmp_block_state.fst;
-  Lib_IntVector_Intrinsics_vec128 *hash0 = tmp_block_state.snd;
-  uint32_t nb = (uint32_t)0U;
-  Hacl_Blake2s_128_blake2s_update_multi((uint32_t)0U, wv1, hash0, prev_len, buf_multi, nb);
-  uint64_t prev_len_last = total_len - (uint64_t)r;
-  Lib_IntVector_Intrinsics_vec128 *wv = tmp_block_state.fst;
-  Lib_IntVector_Intrinsics_vec128 *hash = tmp_block_state.snd;
-  Hacl_Blake2s_128_blake2s_update_last(r, wv, hash, prev_len_last, r, buf_last);
-  Hacl_Blake2s_128_blake2s_finish((uint32_t)32U, dst, tmp_block_state.snd);
-}
-
-/**
-  Free state function when there is no key
-*/
-void
-Hacl_Streaming_Blake2s_128_blake2s_128_no_key_free(
-  Hacl_Streaming_Blake2s_128_blake2s_128_state *s
-)
-{
-  Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s;
-  uint8_t *buf = scrut.buf;
-  Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state;
-  Lib_IntVector_Intrinsics_vec128 *wv = block_state.fst;
-  Lib_IntVector_Intrinsics_vec128 *b = block_state.snd;
-  KRML_ALIGNED_FREE(wv);
-  KRML_ALIGNED_FREE(b);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/msvc/Hacl_Streaming_Poly1305_128.c b/src/msvc/Hacl_Streaming_Poly1305_128.c
deleted file mode 100644
index c3f7c19a..00000000
--- a/src/msvc/Hacl_Streaming_Poly1305_128.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Poly1305_128.h"
-
-Hacl_Streaming_Poly1305_128_poly1305_128_state
-*Hacl_Streaming_Poly1305_128_create_in(uint8_t *k)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  Lib_IntVector_Intrinsics_vec128
-  *r1 =
-    (Lib_IntVector_Intrinsics_vec128 *)KRML_ALIGNED_MALLOC(16,
-      sizeof (Lib_IntVector_Intrinsics_vec128) * (uint32_t)25U);
-  memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Lib_IntVector_Intrinsics_vec128 *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_0 = k_;
-  Hacl_Streaming_Poly1305_128_poly1305_128_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
-  Hacl_Streaming_Poly1305_128_poly1305_128_state
-  *p =
-    (Hacl_Streaming_Poly1305_128_poly1305_128_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Poly1305_128_poly1305_128_state
-      ));
-  p[0U] = s;
-  Hacl_Poly1305_128_poly1305_init(block_state, k);
-  return p;
-}
-
-void
-Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly1305_128_state *s)
-{
-  Hacl_Streaming_Poly1305_128_poly1305_128_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
-  Hacl_Poly1305_128_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_1 = k_;
-  Hacl_Streaming_Poly1305_128_poly1305_128_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
-  s[0U] = tmp;
-}
-
-/**
-0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_128_update(
-  Hacl_Streaming_Poly1305_128_poly1305_128_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Poly1305_128_poly1305_128_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)32U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)32U);
-  }
-  if (len <= (uint32_t)32U - sz)
-  {
-    Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)32U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_128_poly1305_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec128 *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)32U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)32U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)32U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)32U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)32U;
-    uint32_t data1_len = n_blocks * (uint32_t)32U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Poly1305_128_poly1305_update(block_state1, data1_len, data1);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_128_poly1305_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len,
-          .p_key = k_1
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)32U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Poly1305_128_poly1305_128_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec128 *block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)32U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)32U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_128_poly1305_128_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-    Hacl_Streaming_Poly1305_128_poly1305_128_state s10 = *p;
-    Lib_IntVector_Intrinsics_vec128 *block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint8_t *k_10 = s10.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)32U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)32U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_128_poly1305_update(block_state1, (uint32_t)32U, buf);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)32U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)32U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)32U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)32U;
-    uint32_t data1_len = n_blocks * (uint32_t)32U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Poly1305_128_poly1305_update(block_state1, data1_len, data11);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_128_poly1305_128_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff),
-          .p_key = k_10
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-void
-Hacl_Streaming_Poly1305_128_finish(
-  Hacl_Streaming_Poly1305_128_poly1305_128_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Poly1305_128_poly1305_128_state scrut = *p;
-  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint8_t *k_ = scrut.p_key;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)32U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)32U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)32U);
-  }
-  uint8_t *buf_1 = buf_;
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 r1[25U] KRML_POST_ALIGN(16) = { 0U };
-  Lib_IntVector_Intrinsics_vec128 *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  uint32_t ite0;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite0 = (uint32_t)16U;
-  }
-  else
-  {
-    ite0 = r % (uint32_t)16U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite0;
-  uint8_t *buf_multi = buf_1;
-  uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)16U;
-  }
-  else
-  {
-    ite = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_128_poly1305_update(tmp_block_state, r - ite, buf_multi);
-  uint32_t ite1;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite1 = (uint32_t)16U;
-  }
-  else
-  {
-    ite1 = r % (uint32_t)16U;
-  }
-  KRML_HOST_IGNORE(total_len - (uint64_t)ite1);
-  uint32_t ite2;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite2 = (uint32_t)16U;
-  }
-  else
-  {
-    ite2 = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_128_poly1305_update(tmp_block_state, ite2, buf_last);
-  KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 tmp[25U] KRML_POST_ALIGN(16) = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec128));
-  Hacl_Poly1305_128_poly1305_finish(dst, k_, tmp);
-}
-
-void Hacl_Streaming_Poly1305_128_free(Hacl_Streaming_Poly1305_128_poly1305_128_state *s)
-{
-  Hacl_Streaming_Poly1305_128_poly1305_128_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state;
-  KRML_HOST_FREE(k_);
-  KRML_ALIGNED_FREE(block_state);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/msvc/Hacl_Streaming_Poly1305_256.c b/src/msvc/Hacl_Streaming_Poly1305_256.c
deleted file mode 100644
index e56275a4..00000000
--- a/src/msvc/Hacl_Streaming_Poly1305_256.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Poly1305_256.h"
-
-Hacl_Streaming_Poly1305_256_poly1305_256_state
-*Hacl_Streaming_Poly1305_256_create_in(uint8_t *k)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t));
-  Lib_IntVector_Intrinsics_vec256
-  *r1 =
-    (Lib_IntVector_Intrinsics_vec256 *)KRML_ALIGNED_MALLOC(32,
-      sizeof (Lib_IntVector_Intrinsics_vec256) * (uint32_t)25U);
-  memset(r1, 0U, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Lib_IntVector_Intrinsics_vec256 *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_0 = k_;
-  Hacl_Streaming_Poly1305_256_poly1305_256_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
-  Hacl_Streaming_Poly1305_256_poly1305_256_state
-  *p =
-    (Hacl_Streaming_Poly1305_256_poly1305_256_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Poly1305_256_poly1305_256_state
-      ));
-  p[0U] = s;
-  Hacl_Poly1305_256_poly1305_init(block_state, k);
-  return p;
-}
-
-void
-Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly1305_256_state *s)
-{
-  Hacl_Streaming_Poly1305_256_poly1305_256_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
-  Hacl_Poly1305_256_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_1 = k_;
-  Hacl_Streaming_Poly1305_256_poly1305_256_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
-  s[0U] = tmp;
-}
-
-/**
-0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_256_update(
-  Hacl_Streaming_Poly1305_256_poly1305_256_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Poly1305_256_poly1305_256_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)64U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  if (len <= (uint32_t)64U - sz)
-  {
-    Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_256_poly1305_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec256 *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Poly1305_256_poly1305_update(block_state1, data1_len, data1);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_256_poly1305_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len,
-          .p_key = k_1
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)64U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Poly1305_256_poly1305_256_state s1 = *p;
-    Lib_IntVector_Intrinsics_vec256 *block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)64U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_256_poly1305_256_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-    Hacl_Streaming_Poly1305_256_poly1305_256_state s10 = *p;
-    Lib_IntVector_Intrinsics_vec256 *block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint8_t *k_10 = s10.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)64U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_256_poly1305_update(block_state1, (uint32_t)64U, buf);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)64U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)64U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U;
-    uint32_t data1_len = n_blocks * (uint32_t)64U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Poly1305_256_poly1305_update(block_state1, data1_len, data11);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_256_poly1305_256_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff),
-          .p_key = k_10
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-void
-Hacl_Streaming_Poly1305_256_finish(
-  Hacl_Streaming_Poly1305_256_poly1305_256_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Poly1305_256_poly1305_256_state scrut = *p;
-  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint8_t *k_ = scrut.p_key;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)64U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U);
-  }
-  uint8_t *buf_1 = buf_;
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 r1[25U] KRML_POST_ALIGN(32) = { 0U };
-  Lib_IntVector_Intrinsics_vec256 *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  uint32_t ite0;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite0 = (uint32_t)16U;
-  }
-  else
-  {
-    ite0 = r % (uint32_t)16U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite0;
-  uint8_t *buf_multi = buf_1;
-  uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)16U;
-  }
-  else
-  {
-    ite = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_256_poly1305_update(tmp_block_state, r - ite, buf_multi);
-  uint32_t ite1;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite1 = (uint32_t)16U;
-  }
-  else
-  {
-    ite1 = r % (uint32_t)16U;
-  }
-  KRML_HOST_IGNORE(total_len - (uint64_t)ite1);
-  uint32_t ite2;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite2 = (uint32_t)16U;
-  }
-  else
-  {
-    ite2 = r % (uint32_t)16U;
-  }
-  Hacl_Poly1305_256_poly1305_update(tmp_block_state, ite2, buf_last);
-  KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 tmp[25U] KRML_POST_ALIGN(32) = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (Lib_IntVector_Intrinsics_vec256));
-  Hacl_Poly1305_256_poly1305_finish(dst, k_, tmp);
-}
-
-void Hacl_Streaming_Poly1305_256_free(Hacl_Streaming_Poly1305_256_poly1305_256_state *s)
-{
-  Hacl_Streaming_Poly1305_256_poly1305_256_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state;
-  KRML_HOST_FREE(k_);
-  KRML_ALIGNED_FREE(block_state);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/msvc/Hacl_Streaming_Poly1305_32.c b/src/msvc/Hacl_Streaming_Poly1305_32.c
deleted file mode 100644
index 249a622f..00000000
--- a/src/msvc/Hacl_Streaming_Poly1305_32.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/* MIT License
- *
- * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation
- * Copyright (c) 2022-2023 HACL* Contributors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include "Hacl_Streaming_Poly1305_32.h"
-
-Hacl_Streaming_Poly1305_32_poly1305_32_state *Hacl_Streaming_Poly1305_32_create_in(uint8_t *k)
-{
-  uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)16U, sizeof (uint8_t));
-  uint64_t *r1 = (uint64_t *)KRML_HOST_CALLOC((uint32_t)25U, sizeof (uint64_t));
-  uint64_t *block_state = r1;
-  uint8_t *k_ = (uint8_t *)KRML_HOST_CALLOC((uint32_t)32U, sizeof (uint8_t));
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_0 = k_;
-  Hacl_Streaming_Poly1305_32_poly1305_32_state
-  s =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_0 };
-  Hacl_Streaming_Poly1305_32_poly1305_32_state
-  *p =
-    (Hacl_Streaming_Poly1305_32_poly1305_32_state *)KRML_HOST_MALLOC(sizeof (
-        Hacl_Streaming_Poly1305_32_poly1305_32_state
-      ));
-  p[0U] = s;
-  Hacl_Poly1305_32_poly1305_init(block_state, k);
-  return p;
-}
-
-void
-Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_32_state *s)
-{
-  Hacl_Streaming_Poly1305_32_poly1305_32_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  uint64_t *block_state = scrut.block_state;
-  Hacl_Poly1305_32_poly1305_init(block_state, k);
-  memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t));
-  uint8_t *k_1 = k_;
-  Hacl_Streaming_Poly1305_32_poly1305_32_state
-  tmp =
-    { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U, .p_key = k_1 };
-  s[0U] = tmp;
-}
-
-/**
-0 = success, 1 = max length exceeded
-*/
-Hacl_Streaming_Types_error_code
-Hacl_Streaming_Poly1305_32_update(
-  Hacl_Streaming_Poly1305_32_poly1305_32_state *p,
-  uint8_t *data,
-  uint32_t len
-)
-{
-  Hacl_Streaming_Poly1305_32_poly1305_32_state s = *p;
-  uint64_t total_len = s.total_len;
-  if ((uint64_t)len > (uint64_t)0xffffffffU - total_len)
-  {
-    return Hacl_Streaming_Types_MaximumLengthExceeded;
-  }
-  uint32_t sz;
-  if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    sz = (uint32_t)16U;
-  }
-  else
-  {
-    sz = (uint32_t)(total_len % (uint64_t)(uint32_t)16U);
-  }
-  if (len <= (uint32_t)16U - sz)
-  {
-    Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
-    uint64_t *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)16U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
-    }
-    uint8_t *buf2 = buf + sz1;
-    memcpy(buf2, data, len * sizeof (uint8_t));
-    uint64_t total_len2 = total_len1 + (uint64_t)len;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_32_poly1305_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-  }
-  else if (sz == (uint32_t)0U)
-  {
-    Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
-    uint64_t *block_state1 = s1.block_state;
-    uint8_t *buf = s1.buf;
-    uint64_t total_len1 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)16U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf);
-    }
-    uint32_t ite;
-    if ((uint64_t)len % (uint64_t)(uint32_t)16U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U)
-    {
-      ite = (uint32_t)16U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)16U);
-    }
-    uint32_t n_blocks = (len - ite) / (uint32_t)16U;
-    uint32_t data1_len = n_blocks * (uint32_t)16U;
-    uint32_t data2_len = len - data1_len;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + data1_len;
-    Hacl_Poly1305_32_poly1305_update(block_state1, data1_len, data1);
-    uint8_t *dst = buf;
-    memcpy(dst, data2, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_32_poly1305_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)len,
-          .p_key = k_1
-        }
-      );
-  }
-  else
-  {
-    uint32_t diff = (uint32_t)16U - sz;
-    uint8_t *data1 = data;
-    uint8_t *data2 = data + diff;
-    Hacl_Streaming_Poly1305_32_poly1305_32_state s1 = *p;
-    uint64_t *block_state10 = s1.block_state;
-    uint8_t *buf0 = s1.buf;
-    uint64_t total_len10 = s1.total_len;
-    uint8_t *k_1 = s1.p_key;
-    uint32_t sz10;
-    if (total_len10 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len10 > (uint64_t)0U)
-    {
-      sz10 = (uint32_t)16U;
-    }
-    else
-    {
-      sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)16U);
-    }
-    uint8_t *buf2 = buf0 + sz10;
-    memcpy(buf2, data1, diff * sizeof (uint8_t));
-    uint64_t total_len2 = total_len10 + (uint64_t)diff;
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_32_poly1305_32_state){
-          .block_state = block_state10,
-          .buf = buf0,
-          .total_len = total_len2,
-          .p_key = k_1
-        }
-      );
-    Hacl_Streaming_Poly1305_32_poly1305_32_state s10 = *p;
-    uint64_t *block_state1 = s10.block_state;
-    uint8_t *buf = s10.buf;
-    uint64_t total_len1 = s10.total_len;
-    uint8_t *k_10 = s10.p_key;
-    uint32_t sz1;
-    if (total_len1 % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len1 > (uint64_t)0U)
-    {
-      sz1 = (uint32_t)16U;
-    }
-    else
-    {
-      sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)16U);
-    }
-    if (!(sz1 == (uint32_t)0U))
-    {
-      Hacl_Poly1305_32_poly1305_update(block_state1, (uint32_t)16U, buf);
-    }
-    uint32_t ite;
-    if
-    (
-      (uint64_t)(len - diff)
-      % (uint64_t)(uint32_t)16U
-      == (uint64_t)0U
-      && (uint64_t)(len - diff) > (uint64_t)0U
-    )
-    {
-      ite = (uint32_t)16U;
-    }
-    else
-    {
-      ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)16U);
-    }
-    uint32_t n_blocks = (len - diff - ite) / (uint32_t)16U;
-    uint32_t data1_len = n_blocks * (uint32_t)16U;
-    uint32_t data2_len = len - diff - data1_len;
-    uint8_t *data11 = data2;
-    uint8_t *data21 = data2 + data1_len;
-    Hacl_Poly1305_32_poly1305_update(block_state1, data1_len, data11);
-    uint8_t *dst = buf;
-    memcpy(dst, data21, data2_len * sizeof (uint8_t));
-    *p
-    =
-      (
-        (Hacl_Streaming_Poly1305_32_poly1305_32_state){
-          .block_state = block_state1,
-          .buf = buf,
-          .total_len = total_len1 + (uint64_t)(len - diff),
-          .p_key = k_10
-        }
-      );
-  }
-  return Hacl_Streaming_Types_Success;
-}
-
-void
-Hacl_Streaming_Poly1305_32_finish(
-  Hacl_Streaming_Poly1305_32_poly1305_32_state *p,
-  uint8_t *dst
-)
-{
-  Hacl_Streaming_Poly1305_32_poly1305_32_state scrut = *p;
-  uint64_t *block_state = scrut.block_state;
-  uint8_t *buf_ = scrut.buf;
-  uint64_t total_len = scrut.total_len;
-  uint8_t *k_ = scrut.p_key;
-  uint32_t r;
-  if (total_len % (uint64_t)(uint32_t)16U == (uint64_t)0U && total_len > (uint64_t)0U)
-  {
-    r = (uint32_t)16U;
-  }
-  else
-  {
-    r = (uint32_t)(total_len % (uint64_t)(uint32_t)16U);
-  }
-  uint8_t *buf_1 = buf_;
-  uint64_t r1[25U] = { 0U };
-  uint64_t *tmp_block_state = r1;
-  memcpy(tmp_block_state, block_state, (uint32_t)25U * sizeof (uint64_t));
-  uint32_t ite;
-  if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U)
-  {
-    ite = (uint32_t)16U;
-  }
-  else
-  {
-    ite = r % (uint32_t)16U;
-  }
-  uint8_t *buf_last = buf_1 + r - ite;
-  uint8_t *buf_multi = buf_1;
-  Hacl_Poly1305_32_poly1305_update(tmp_block_state, (uint32_t)0U, buf_multi);
-  Hacl_Poly1305_32_poly1305_update(tmp_block_state, r, buf_last);
-  uint64_t tmp[25U] = { 0U };
-  memcpy(tmp, tmp_block_state, (uint32_t)25U * sizeof (uint64_t));
-  Hacl_Poly1305_32_poly1305_finish(dst, k_, tmp);
-}
-
-void Hacl_Streaming_Poly1305_32_free(Hacl_Streaming_Poly1305_32_poly1305_32_state *s)
-{
-  Hacl_Streaming_Poly1305_32_poly1305_32_state scrut = *s;
-  uint8_t *k_ = scrut.p_key;
-  uint8_t *buf = scrut.buf;
-  uint64_t *block_state = scrut.block_state;
-  KRML_HOST_FREE(k_);
-  KRML_HOST_FREE(block_state);
-  KRML_HOST_FREE(buf);
-  KRML_HOST_FREE(s);
-}
-
diff --git a/src/wasm/EverCrypt_Hash.wasm b/src/wasm/EverCrypt_Hash.wasm
index 8fdc7b27e028cff8b53492b41b29a176bc2bb191..ec97a1867d4064e1a5e6d1eb078c5bd745a26b89 100644
GIT binary patch
delta 1425
zcmaJ<OHUI~6u!5eP9HO^EsuZ#?bJdF#Q>dYkp&t{Ou&em$Pc)g!axg!s#8L616{gM
zG$$He7!zWGOK=nxD$2sd#E__4Bm4w*#*L3oAKXp{*`4n@-#Op8=bj&V>yx~7y~~2%
zYZ1#Nb~TO2;~6p{t0{P94Y-;q+_ehI$wNrSMiL?+WJ<}Alqkj!=P~B~Cyn<b{&*=&
zGp#3T%Hb)~=?hYap^QAEh_KJ}9KnWdtDbhMEldjEZDG$TN?o}ahbGovs~ToIYpFbY
z+3%x;C12HF#lQjEXqF@mlCVAGsd|xaPwZ`Q-sN@A-<Xhd3dzW74z+@hX>{nz*VhX>
z_6`sjmJ+{eiC#yIV$v~eo<(h|9`}o*!7ND{Bz9-0nI3{Zi$v6$^NMm?>4Svx+qiL+
zB&AQBom13QHix>5zZ-3#=7mO4qc~y|i_{esU4iOrTr<MYMtI-~EYaHX#3@-K(m)LL
z8cn1lh4H9}`l|k@{L^GCJv$K<2g39fHJQ?b7Sc<(6q!*LQPg}c0dsM2Wee$Yjc77X
z7LCPtzsdM8Q!y_0kR+3F(qx=87HPjZKenyZbM}WIxVv!>M%^vWSvix*j-x?AN6aln
zRp5bp);W=$QVK^p8qjI&v}ZMj-)J9)2*F>qj@3Mi545!xP6xgV8|y!vmc$1%7xW4i
zSl<ZoI9M-Ogu0nvgMeXw;}1sK-ph0NcvU~#dG!e+xc+)KBH%*5LJMmGC6HIL8F7rG
z1YFvTg$rrnP#x8i6pBT05ySJ%vx8U{de87SPAG-lJW~m;^43atjki?72#?DlG;R6o
z?)j$M8IF4d%gYF(5aL#};jJATTiPS|-rc8R7RcL1=-KAk(mLV7_TzG>?QQ?X{{RB_
BxlaH9

delta 1439
zcmah|O-vJE5dFUGZcEv%ltMwUKU)g{iY9bh&_p;CO{f9Ugv5g$zHIA43#CF^O!Oio
zMB~k5Br2LHoVaMvRk$FLn->l7VvO--Jehcsi$vVrZrL`}dVBL`=5@ZeuMg);U*}A#
z9Tfg82dGh8<BE#$6p4!21UY>*Bf+A{%ezZCG7^mh2_a)rj*vtbs)i>Pr|0bCR7&j#
zWI#$wnPgm_NC-k0vFb?efe~=iE_iM+m+3_R*~!xFH~@2WGd!b@>*xo)<Y?Ef)Gio_
zizzV^f=5i7(`Oui?o2mHNn>LCI&3qZatpgmdpTM#4_>w#I{hheLK0duB9F5qB#@X!
zXUWXe6)`7~$(d9xiJB_z#k4*6VD>wCL%-<GJ+r%^=mnbd`V%@WZbz%ORPyGFmez9g
zo$~ZXdHSbi;L?G^{r`DzO3dca(TZ#Odv8_w8yhsWLL@>YJ3S*wHze4q`sivZ=LJb9
zp+p^Evv@o67UY@0A!Ax~BA?)qTt*Tnlj$+ov3m7X8=SY=%Q)0Ei0-55eY9BZy{UE8
zJS&$a#1QEY3aHCz{8Hh&2ZfV|HMWQ-Bmo5vn9*1=YlH>~rP@PEz+yz`?kN`62wgme
zLKVgHPHTyb$y=-kvkP^s<^KHaEOPq6Zac!Nb}D~|G&<Fy)?8dMOo?qWje5N*VNRx#
zIn)O=HWy@U!`4xyRzARx)|9{5mcw{czTHm<^~na~E6wnw%?7L2Sh;z5*o=S4$rWoA
z2FR}NSaIYnO8e|QhVQbUL%up5^NtB$J&$1N#UD(`Pha-o%2Pcs`05>|p#JsyfTE@d
zn0P9JBFG+MO6xFd5Wud5+ai3?bdV`21!Y)o!|2+HUP_V0_G28w@_E?F(FHinnF{bc
zM-^az!#ae|YaYkF+VK{eWgmlLG)8{JF38sPEylX<E^KeyEvhc8&%@>hXU@9|yKf%o
KP+r{J!+!xLKff0M

diff --git a/src/wasm/Hacl_Chacha20Poly1305_32.wasm b/src/wasm/Hacl_AEAD_Chacha20Poly1305.wasm
similarity index 78%
rename from src/wasm/Hacl_Chacha20Poly1305_32.wasm
rename to src/wasm/Hacl_AEAD_Chacha20Poly1305.wasm
index eb45d058f670de2e077ee014b939dcfe4bf2427d..560e70a8ffd9bc1dc2e25c53a6dce148d4632877 100644
GIT binary patch
delta 535
zcmaE9{nUDbyewlqV|@Z+9aBA!1T*0TbAA0p8#~7H6RQKb5_2-s^Wx1+7#JtpGAb}K
zP3)IvF*aggo*c)h$;dLfgHeu=6(nY8#=tgtIiofs`@|oLsvLqIiOD(fzK+iE0r@$V
zhQ<b_iWnl3zcS{#U2kY;U|?1POFFtby2LwYBqnDh8X3U!#i!;a7gZLNF!170G<gHl
z7IAhdM+OZB21iy$#vDhE955$KfN^s*^BZPdl9TJ$lNs433yO#`GH(9Cp2-U1Os?R4
zj7!tzB0hCixTGYib(1#;C%_EZtS&N#39b;`UAM$ES;UzY7!;Tk*c8|u83nkc91k!k
z@+$BOuul$<5o2VWY$%;50g__?$|>+G3MdFDuqv=9fHW~qUL~yyvk0g#h7BYQ*O@Mp
pD8{P52GJ>`D6Ak1*2e_YcTYwVX!H-6J)BG+bATqYP2MZ30{{t{cc=gW

delta 590
zcmaEA{nC1ZJTr5B{X{Xji4InbXD3z%Oy+0gWMrJ!FVA9V#=tarBcmcC^TdA1iKjUk
zStedpU}T*v$*8Ho#vPtmoEuzPP>^3#B7nq>FDXhaU|^rz%BZc(A>fgioD(0ApHpdQ
zY+xF1Y@~oLF!?)UzR%r;h6V;^Rj{0MMq+YCqLBfTruf9v#FY5dyyT+Ff)WN^{OTsJ
zVcNpTF*%W0l#yj~IrAH47-w=7d$Kr(lp})%1B0W0BV!H-fjC(LEStZuXR_jwoLs{D
z7-q=kJU(@VLP?Mj9B@-6uMtkbWyoe_kvU9oQ?7|=vgolWuqm)AFeorOG74}>IUZn8
z<W=AmfVuDh1F91z>q;j|fD|(T6)W&73MdFDFexxAfJ|YZyi8gbm(4(pF|r`#xQ$Mh
qNi={PEu<){APhH}Uw{W1WI$&k2ifE=GJ7~#fF582d13NSSseiP;Dh1-

diff --git a/src/wasm/Hacl_AEAD_Chacha20Poly1305_Simd128.wasm b/src/wasm/Hacl_AEAD_Chacha20Poly1305_Simd128.wasm
new file mode 100644
index 0000000000000000000000000000000000000000..a647522149a9d0384bb143e836cc6786352c2a7c
GIT binary patch
literal 1910
zcmc&!OK;pZ5FSeVl=h+ZE3q9n!{m|_s1ZAjQTP-hZO|Y+6bR5B6<VTWt@Vne4d5Is
z_x&4s?XCa9rxyJmMf)?_p|no5ja&HCLW|`L`Ell(k3)jYBL)EM*R!)T7(HjF?DRP+
zS=szMHOd8FFPmklhYk(3_0~kMUaDXX+c^J<G7Y*<WhNsPnvseOaO}^C$g^3RCRwr4
zu-yYyB%#`SAK)J$G7MhCE<somveb+c$uLYjFq(i%(%jz#vkBR3#<*bOrqVlBKD3%h
zr->{wnL@j{d`YgIJ?JzcTg?dGk@Ak+gW2Js%G^A7t)Sb)E;q3x;A@p7(7XGw^g{R1
zk@Sw_?mhQ2<?ZZ#;J!7v`(W>KByFpAs@Tg;(!!<f(7!k+%a?_8^Fn5YS%UeE?w6Kj
z4~CuZw8=^k=Z4+=T|4HxRaWtl`0x#%Pm}Ou=l;D1?z15BHL-aX7+eMHtBMi~`!x-c
z#AMcQ-)a&+>6C_X+ARg#0pML^!ijlL4=L}X!7VhfC!A0){hM<8HVBwHZNRPo-sT-_
zV`q<A?8kYi);yFY)+_K2tU_Ca?oq@Z_BHyKFBTII&_tkA9SX~w{)RXQKE$EZ-KdC>
zTN9|fslzCm#cj0Erf7VGBLVvWI27!d56y>Uz|+>l3XLN@9Zqrd9yCA;5eBTyY#T>>
zOqp;@pC$rC1jpP#hl*@f44GpSBOEO3w!i<nu4Zjs`#?Ag?4AFPyk>hv)h0NpRU>-G
z+#%JRhH4Y?v}%*NYBgw~?zw6czNA%KqK|r^R&9wd<8q~$fego$YSdtY{hJaixFY^w
z`W#pIDrH(DN^8P-a&eGj@`w@<;@7bImvbGj;q{!hzU1qo#PwXTY2|I;I&Nf@xrv*i
z`>HZ;;40prlC^`Y6z!$9bXp2O<!hJKs9}5yw+Q)Iqgy34D^O{=Vrp3aj)1mcHou8C
z|K1(;&tFn|Ft`o&xuI|O_M`c8@#bRFrQS|2Zh0zwU-|k8*+(yl(jb&Yki@96EXlB#
zWwG+HNHCklMGz@GltDPll=ZkC@7#)lJP+a*Se&E^gBU}3lFSN>WV(-^1P9ws;(}hd
cBFVIs1#upD`Sz>o&AW9MuE5Yw{x^1i0xT2S*8l(j

literal 0
HcmV?d00001

diff --git a/src/wasm/Hacl_AEAD_Chacha20Poly1305_Simd256.wasm b/src/wasm/Hacl_AEAD_Chacha20Poly1305_Simd256.wasm
new file mode 100644
index 0000000000000000000000000000000000000000..804858db6a6f1d2e5bcd87ce4d19d24b3ee151b1
GIT binary patch
literal 1910
zcmc&!&2HO95S}F^Q6lw2`7f~@H?zqlDG(!e5~J`bi?l(5^iUu`dt_*G9aH9CNGgDH
zFx@6?pCQ-W`W8O5=zA3HW3;oRoKm%0_|$@ec4qh6nQy+GU2yY=0Ra2;<m3c~&)G3M
ze$Gl(Hvf*ba?V%FdReNWLqlzyo9Na{6|7-vr*9}z-+1a~ZX`oJl92|s^)(TBHcitc
zE0!9zvnPurlsoqU{t@Da!Hd`-2x~$Xno%M*3=<EuCg6fJ_qIWALe`rx&X_o<>m5oT
zT1}+gL>8HwLc6(qL9VSG=rkc4%?RFs^bVc9>HfaVoIH3f!DwO^n^+R?wagOe-u~S6
zLg&$e>m9h;cb%`Kx3&F=b8d3`!OnRkO**u#-YH`*J4y?OCZTtBP^Rw|u9FvTR_G<@
z->H6SnO1+$`9Ybi1aYca#_!6}Kd!Qh&%~!6_<Wg!M_c#qK5(7|k+0ItvOxbbU|&|0
zV9={+kR&=YfBRmM_(`SIv}2<ba0`GN$b=p9t{PI_LyeoLVOQ9pVCpyF)=dyFvD<)#
z0N&;uY-4AKTI|PJsMai$DOM}+dsd-MLU$=*7kdi*%Qy3h`=}#Ost$!^c5h9b0w3VO
zHr6U)<kkc#Z(=iwW^xNnv?v-M;!waY01gB@<OBU78Su3AutMWdO@|X4-hl>aE<&HR
znPuUSk0=w4=u<?1h~S9ZXj74`iXpQtVuby<-PXIebu}xq+WW$uV{iX=<Q3aXsy4=P
zts2og<Tj~hH&h#wr&JryRI5RAb<b2A^97~a0)5mAwQ37|5f>}XG-NocRHFtH?BA4F
z!X@zsQ|GwEmnqW<QCbo9le2>ykw=t>5Wj-PU(Qv$f>(3e`jW4T5?6D<CY85_tGJd`
z<~ptm<5gu|!)3fiB`XJ)DcVbAsk9V+!dEV;QNj2IZV>XZLbpn4R-n>U#niC;BLQu}
zEPfrY|Ghh`pTDK{U~m(xQ%&9O%}2B6;={$JOTC$%-SX7+ed((wWEZ_8N`ufXf+R+n
zWl4s`G>fH=MS|HhE`mtnz8i$oOq!4D@z#wf$nzk6fyGfOF^DmAkCJJDk(=(~7s1}<
hlenN4u1GRvWkH+=UcUKCdX(|TZMXzOKl$I-{Rv$m+Uo!S

literal 0
HcmV?d00001

diff --git a/src/wasm/Hacl_Chacha20_Vec128_Hacl_Chacha20_Vec256.wasm b/src/wasm/Hacl_Chacha20_Vec128_Hacl_Chacha20_Vec256.wasm
new file mode 100644
index 0000000000000000000000000000000000000000..ee83c846c1548d2ab3c2e5bc9a49a3570bd64588
GIT binary patch
literal 1845
zcmd6mzmL-}6vv;FCTSBmDqurM&fRjiR9p`#bX$dlK(Hafm~&nCYA%T*$3f~1O~nF#
z17d`e7#JA%OPCp1koX^P`s3v40ul_+CH8y%y!XEH*8!3z2msL6*=z<^j?ffMk5Gw9
z^E0)|wYWOA%krFB?p>YwP1gGUQjZ9{M?{i@#dg9H3);amt>8(KWkRa!#y%P_C1TdQ
z4e(2Z#QZpoG=fazs@WwWaV#iUCQvixpbNH%+%S8rltdY!Cu|4}lWd!$k|YDiJg%9w
z-vie~ZkiqFn9-AHP>e=QMmaxa;F;_-lhqe^%A|ni>Pr0~Q6$P0k;*Q?epOlOw*27I
z<t=v}pc49LUf_MJPVBu0gvQamF`;A9y%jxWw0ox)>0lAFl*&n_BAtZbBD>$cdy&;i
z*lI0gXz@1k-&Ur(@2Y{8HMPq=>;i<&4)o!9+X3wKA+*B=wsE7Ut^TwG-6iN@6gDx0
zt$sW7b<rC%ukv)26S~;JZdLVRS=Crp`I-(i{n?t+zNVXD3pa7AqTknu`C|W#njein
zQS;iZQxkmD)&PPX2+q99k<QAI&ccz-P!xlhMN$;$uyW%NQ;}pmCW?y`Gbx3{s*oug
zVkNLFQpFR7M})_PWd8ldy1Sk5Jm={#R+Efjp5mBHM4@m(vP1ls4>}K1r3We@tDWR&
z&S~CxF@F@>dr+(F{H;(S*@j9pp7CZ}oc>4FE!ByCtV~a=v$RsSZs}%9TUq+Q)cpkR
Cjzv`f

literal 0
HcmV?d00001

diff --git a/src/wasm/Hacl_Ed25519.wasm b/src/wasm/Hacl_Ed25519.wasm
index 57ca4d366a2d9d8f5a81677d96a5d09bdce14907..5fa25fad0894e986be94dc32c9424207c287ba74 100644
GIT binary patch
delta 3082
zcmaKue^gY*702H^c41*rHYx&wIB$_CCap$~=^r(f)fh;SNCX4x4;BI<!XhXnkQxE4
z#z;(}d5O<jNTTt_IU3vO)^3uTh_y)@D@tt8)}K_NA=+b=^fdJJB=&yq!Xs{vIcN8N
zXFhXh=FXjYZ(ieNOZO$qrZFKQ4Go(|CLTyl6T;A65LtSH8ZLxhsXB$Kf23+6L_Z>&
zPK!Oau(~4q>8h&A6*a@YQ}$F8dcBoJidDZ?O%b7bmYOKS^jFpUe3#X;YE)c$VUgF9
zURYh~$xe4o*4wNJ5ra(cvuaZZtL)+_Ub<v)b&Y501Cy2Lp?7wVF{{43&=z&-zfY`f
zRK|9%ZfsOkwLYxDrHoe;EzA|6*`&>38^v{3giH0hY+kOEIV}3buVeJ?$c=hi_#|P|
z9lg=QqRZh6^va0ZJL-9M)Z6WW_89%tkS2Y7B>DtcV4TBdT@HuO;h4M48)$Fx&5Y_$
zO(%;>+z}iCX&Tl*gNmCpu}=;cHq$vV$a&(|Zr|qk=R*g)X&LK$K4+pJ!0Yn$B!vsJ
zd({7T*9((ZhS;<b#&)Y1LDT&!`FJFCJ~BD5=yE-E%4oBHP^`Y1DZd<CNbuma5Z`ML
z)>>)M$Z1J}WYR+{IX_{ko6gdl%F^m$>6*b}dzj_^Su9zPj7Rrp(<VdSNoTqID9f-6
zmRYk|>M~jOJ<f7z4$D1Husl4Mr8b*oZw|}Fc`S}xh(0r~m8`|xM)tJ(6xki~KPOwe
z0OhoWXUHZl0{LnY$gzBoZ64U=1s!D53elSI6v$U>j~Ai5y%@G`@d>iCN>-6QR<efd
zBTGIZJERok#Zr)=OF=r8f@GJ0oGAloDF=!7g1q4cSzG~fwc<Il1<P*3R)UYK0^d~y
z?pzM;Tmk;&3h*V>;9b?=T{YnQo(4~O27LE3;Ja(VE9$_n*Ae%XKD%Ef8Ts5aY(e%a
zmQD36Z4E3Dt69?4u&n(V%h9zg->zf%@p_iJ7g!E$VCijQnYa<6dtW+0Ht}Ul-Td+{
zvNK*m>f$ybq|Qy?`I`~y`OV-5Uq!$Nb(A-}23zntJjZQ;=dLaAbQ<9QVcWhH{_Q^p
z*|QC{{ui+Gn$atv1--VlpjXTr;5XQQ{3goHZ=rAVc9g4kz&`dithSS^uWx6j)qHa3
z!PF1!y+-)0eSC(0{|-yfewN5zu}pfGCFcN3-9eUSA4}`6S-K9f*!(Q_zsHjGK1<C9
zEL(p&4x7??_yI`ohkT-wkFeymvb=bd<)h!R+&acG_G6aZPat~JA8__F{)n?5_vuNp
zJ)h!?zjqwv^?&+|tosB`_Yd1ZzG(wFbrK2KeF}VKJNz>~196_l)6jPsyzS2*J3q(S
zU*3TP|FZ)d`}!HAeBoJ;b7ztA+`k~fxqk&uJO|!<4m|EWc-#f>&I{oA7s1b81V4WX
ze8**Q*$Lj#3EuKI@Zu}rSFV72u7Z2I!0+t_-`NeW{T*EU0=(x7@RA<zt3BXXzXacV
z4Sf7{;=Wzi7g)aY$}RV;x%ra$%EeXUTit6|?)KWG`{%>hfjgpA4SFYA`U(by_O0y8
z3o$F#{<~-JWgGM+ra9fZxLssAue`mHHq~zvVmdTk6)tE~h=^7PoczDBiWOmIlnHkR
zV%$m)ZvWOH;u~QG+h`5)*&ij$X#HayV*B9W7lw)3$O+$K{@xLy*=*yXjc;QpCYzBO
zD@F>-mK5PO-i;MuL~f1}bom%3V?_nYv^en)$xq`bkO6Pwl~I^?z%w+SJ!|6O=|31R
z=(^~?!Z1~#8GoyS`00nw1o{&2myH%h2#c>fqif9f`7V-Ty14{wvN1GCOtLCcxTG;N
zNtE1unxjb~0wM90Y+M=3Nefim$)dpA0vQxud$Jg9_VZiDiLSxD=nPs5WpI_aVbrIH
zG12x0Ykh40$B@l6LJODSj{cnp<X)Q~V(8Y7>c91ktO;UmtWE3xUJ44s_{RjXj=Yog
zduSUi{tfqu8)CSH#<#i@?J<{C%a9okn>HKmb1cL{wJ;ef!ycq>u}qmcL-~=S5Pe)`
zxJ0>@LB%;CsZ>!iEUCN|GLvdHZ<)+YQTm;;`kjqiKM*4uo{*W^Tsc>2lF}_z#}qAF
z&Lv+h2V&FaK^)qXkXS7jBDFl3w?}hJsm+)3WlkW51yZ`g0$DE87RrTkUVtwmK6HTR
z%Y6A{fP08XyQcD~DbosMfy@mw6jB2}F@nN&(@JSoPtm`K(p@ZzDIJSBxxx7;ktGB9
zSR#v&k7DXoO8Fo)^HD-I<fFvQ$I^ah&c|$O(aL4HbV@1hj)|I=%F&JqT7|5zYs=&^
g+8Dc5DJxm4WR<khB!jzRw?E%VpUgr@QtZnA0IO79ng9R*

delta 3006
zcmZ{m3vg7`8OQhB>}K;M+mJMYfSemhYXX`;EG<(oyXj~ihJZ#w9>mR(Y&LH;n`mJ~
zqGIp`fq0Qyi%7S%!$5%%ud?C`S`kGOG(5!eFhE7H3~jMPX=N(?{@*4Wl9|rH_dDlz
zzw@}?y}4&Ed})4sz`Sl!r05i#)nbLx+0&N1%bq91PVE^nU$ZNbLTH7G6pFT2q>3=D
zNlBOH=!LHKmXe3P-X*>c+bwNphtK6TStxaqdHPIKxb}uJRaiBPq8bCr2_;v}chxjI
z3tGI*&XRn`3}>FZ)$MDjaXOtIcZak7C(~z`#*X2fd{=ut=iJ8Kk`ABS)zZ-F(Joum
zoKY?~S6xGE1C7BPiKFJ2FmevV5xz+g-)RF86O*jt9j4n&CP$P*X?8?3bEnZJG&Lnj
zI~=oG+Z~xjH}F|hB9)=%`*N)`YT1Z>PBi-d7+m+q`ZaG1@`L^09Pftu{@A1ccfH_P
zvym0ouY{7!4l!a=*g#wS6EvYn9ompLb(+yVcDXe)=(drA3^ipS{MHeMH?>fZ;CnLq
zCr66V(72I9wX0K(g;~`E{k3?JAcS!y?UNW9n7M6u?RYIcd$@?B_G?sS?;O6F(Ua57
z7>~|eW}%6%{d}?@jGM!lb2r14!|1XzwmKMnxs0p7U?kqdn04>%7``}f2Jm7&<KqIx
zjY7tqMGV(m#`<E$NAnop&1c;40HeBup*_eruz>NOQb3zlwvXbKvObFMlz&R``HEu{
zt1HpYTJ$+ZYZc7tDwypL!K`#5R#YFSnB~G%%VL<*9E}>ZpQ}Z5xqB(zS@#&l?R8I5
z%=8?f_%9DkM?K8-dYJbcU}iVM>}`Zu)dXX1hIzUfrnm*Bw`DoSg4UaeOW<u@_--$J
zbQ^rM5B`V`ettWAUpstX2mJbn;o~2HU;7CB+F!z#E`>j~l)Q2OvK<QH=A*Om2xdRV
zShj+(yOZ(H#~E2qGFnzLwmrqT*u|LgG{f}_<CSL_r=DXZuLiUQYj#nzc4O(T?%z|K
zx)!BV)?rHr*TLuYET_1?2Y%xV*ziUTZQpvtf)|l#+knjO4akhv;m>p2^DE@<c?o9S
zuMs^P5$}E(qb!>+YUL&x^};LACphlhjCR*=Ftp2$_M+b+X8#T`>Q#!yxmV{}LVrAJ
zN`<z0+c%`2{R6+kJGL|WcQCH)WQ==_F=rRU^*W==U~GMZ(f20f>TX8jTZ~zI7*%gG
zo_OcScqUuloerFOk6-99dl`;>jE+Atw)}~4@&iWfhYb4xKx_X9ul`*h<JDIV9;VoT
z5U;rZ6STdD`YGo2;^j{2gZa7-=B>jhz}ipYYd=H&UHvf8NASmR?g;$uqcCfZ;nlA=
zj{1Ia9M5&d=cxESCtyA}f#+oZ0tL4J8QyvlzUw5satdBK4S(=7eBK%O{b%6!e+mEm
z0DR<G_*G}&SDk~OdmjGKdHDRlz~_GjA9n$M%?0?Vui>LE!uMZ<pLYqq_Y!>XH}KkD
z;eT+MywQER(tPU|SM#RE8%_UM6Z-1nBr)p0eaRg9@|s6|dDRs9(qFGl9y*@(DWeju
zT(2Hl!l?asSy-rZ^v0IqZ|>-6d%nkM#D|Abdvi5C*f+0=9715aA{=0Qm`GHHasrzz
z!Z&=fOc=QbeOJ1u46KY5H|WOn&N%TyA@mJ#BDAqUtWEfbC;Kp7P`w7151dL6FNdyS
zstWy5lCaZEuJJsRJsC6U{$xSNjUGrAErhgjVh*8ZocQl>=F;)EX8wIVXI7;kGq5p5
z(BUw6XknT1NXI^t8Y5$DRy|W<r9jC<QG>1U!KU|35-ys1@Y=mn<b<|~=cZpv6*DZt
zA*D&5Hd)jSr*4}pqOcSntoorTT!>&NH;s3=Hv{()*pntEh7to;ek8sUp$C1|gd)>F
z{3@XzB080!T+dAxQ^dErCtXZRjOnzjNbcn8YJ?*}jg*nL!J^{+ymVs64W8KgtPJrK
zZSegJuITX$(M4C&wEn0B+J<kc_?t*l3uU31rn5Lq>U|E2S|p2XR&_3N<_U7)s#S(d
z>r9hLEtbW(rk~P}C6hW&7CA(dT13tHVRq`6?laqYPqLVL5zURVINdaueE(pw?oSY7
zX({yqS*(`G5~)g)6jHHes}IT&N>>*E5o#%5Qx^isY8fEaa#_Abt&mc!l$CNpa2t!H
zbXbE0YgDUbl`IYVhsci^@=ocL3xmE|%AYwBhc39JOO^#MET#ecdJEcl1>G#|vWEWG
zQi0vlO%<u*nvO1sM|y@zQZL;oiJL|>P)UfPl6a_xl6XQTX&g-El4R46Ch1X|WwT6`
lQbyY{)fQ?e+A`Eu*&3}bkxS_DM5|uuWwyySy&zLe_#aAeQs)2w

diff --git a/src/wasm/Hacl_HMAC.wasm b/src/wasm/Hacl_HMAC.wasm
index 033e7523a05ee2ad4575a4696b36b47f7cef8808..c2e51b851c0e3a3f21be3160b08d2c2ff85f9351 100644
GIT binary patch
delta 2779
zcmcgtZA?>V6u$4J&_cUfq<l*4y&?wVhG|8_4RE)73sfC~fXq^$&_W9c1y^TQ)Zpfn
z0edsEIW}XKW$}mEl5Pn~Hi68T&5yaov1Hkn{n!$=m>EMdVOg@Bb8D|9S9BlQlIF>I
zpYxv6^PF?<xu<?2=V!@C!kXt;zrT_A$paw5?;p+mk-c$@rV$H`WetSTDYA;N^asMx
zNs>ahktoFmGozsE*_9*`oXH|(%4VXdm0d@a^c3MURoYT}v)fi`?{(U$N(Ix3#YdYH
z9bKJ{UT2q=iKhEmBfY|=#w1DC=pB}eNxXMl6JcDIEFCeYnKneEYWYPReK8_kyLvG-
z6_F91X#PHlG8M8!4GLKzs#uihQ-!Remm_6)G9%M>FD}Sz>vh^Un#{Jo?iRb(Ve4#i
zcQtqPGKsPG>bAJ;9S(D^QN{!tx6kEu+BzIQCW#)4THz1t!QDWc&DQ4d+FboR?M-fn
zt%^yOCpXBG->t}P@AWb%@`#o9kH`fPo<6tN#jKFUG|411EtWu!Mj0M1z`_{hsbvD1
zLM;^qtdvK{1Oy{!j`DHc#KD09Mwt+lSSb|HqslMgVLsRx4-fy;o<^!QMAPq7o>(nM
zI93;}PT^vBJ*U@&?leAeUb=6}{eaHL#6hG+JqCD2Z2`Qgejjd})+B&Et~p8+0m~z&
zSczI#hSXu*FsoK%GYsbwNIUPNJJ#r_DeifQITu$B7^(k43K`9cqij5e9gklYA<==P
zF`%|5&$C29%Tlf22-nenSA*su8YU8zDFTzhFoH@TZb9J|5A8ZOU^S`;l(LIMStq#@
z@CPOy8&$#&62)6rdGuaeh2Ge-S0wL;X@fy&wJ5DtlxAOjGORooeKtMRV%^^>PbHN{
zUG_TKkY}J78TC-Xjl7M3G5Ixs_WU%!!F*&U@;3p_iT2tpXxl}6O~lW)yi3=w%LDeK
zCs79x+G)z87(FcD-Ah6>puvKSU_r)b$p(B+v}Z-zSb$88h(iVAU>6r+)L0=#O%|f}
zj)<8>Xgfq4D>|FDT%96JGJFr$qYKSolEERs4<`acpd+q2da<~QUNU_Pu?r;zI&IDb
z>$dqc;B09w;PnlA0M}=&20T-S%xD(kH4z)PqWy-5UyGPneg&A}Y>fIP8>5^Rc!%$d
zqvI88!5Z3xXJaK|L*>Wxa*ifxsS*RXavDh3bZ3@x!M8{MFGsr%{`{Yg^(MrIMmhxc
ze?QLgB^!h@3|uVUfUnC!Nd@3-^8q?tnhw_gGr;Rh*LEvsnnrCK2%8@+!Tv<8z~KrM
z7=h6xJ#dhFcT7O^_to(r$7-w#j6Jm-ABP|@uBoR_@3ev|G>vAZj<<4F*r>r9UDe&v
z?O&_Cl6|no2e!Cy0<pVV2~h=}sojq~oU1DW++2S^x_hPGFWF7Ik4W|(yI+uUyxDL9
za?p)?`hW^8%9&(N3$LK0H1#hi9rb|HfXjXsRf-4tShEKzdcC<DP}72oWLt|%@_yEW
z-aJQ+#2j}Z^P3}AVk%panP|loGuMjiqpS_rho=phx7%=Wd?VU=C)(vA9uo1K^TLwz
z4UD*)*t&)GPq1~3TlMrS_hztSJ=n5f`R{mq!0?@d#P~Xqnd>Z*n5r&hu6E%@i0Q`7
z;p)cC@lrQ3mqk?VMY~AELn2-f@%OzmOBeWa&#T0s;-UqHSMn-O$*JJ8s8$~tfG;&+
I(ik=KC*g<^Z~y=R

delta 2871
zcmcIleN0nV6o2=%mI|l^%7@UxtH3~O=4=H&0@_zZV2B9;iN(RA)I!T%0ZYZWY^%h$
z&17ThRks+2Glsb(eq?kehAhrF|IlU0$jr?Cn5fIPFlYAlk4xr~opakh3Z-!$F=>7|
z?{|Oa+;h*l_jG)Uocxg-%z7$Ff*k}w6$~miBgo5qFtSDtReaBny-o{>oo4bGywyU7
zNFKdQH0qV*98b6LCZeMI$P%Ka?-7B_qYko^Xy|>CPm<_z-b}Rg3}4VLyStsPJ(9;c
z;Nj*ktaZ9%sTORhsa9NRn)xY3w7dFuI0J4;b`Au%<XJApX_u-#znebbg+&I&H3j@`
zr_bBlO|w)5v_qxYQ0MEHaUY9R<L-6)y)H?Ty4?ZEV<{`=7R(H6DGmogb(6~}JN;JL
zt13<}ji*nkN(*Lf8QulYC|TF6;&il8m6;Z|Jtb;G`W*Y1)pPFCt}cx`+Ceot+8nVj
z&mDVe?$~+r7Mpd`rL@b==iQcUMuhDF$!d{089l(|P@5)?ETf_(HyEj*cd*&nDZ8a6
zZgE_k!US=o*v96@xfMOe&9_Sqc+Fg1oMUX>ozr=D()Ai+IBR)U)^^Sq7bQ09*eEly
z&X048%{uHxztB`?Ume=JmrIJ=)&{XEa<^$w(pT^>NPZ?f8=dx68WDJ5p>BRku8?ff
z3wm92%_)s5y2<(1L479q1mMOLJK(D+ALVFsMXr?N#0gF$vZ$8XSC39jN19scxzrVe
zgl?yv<P+<TEII(1eBGNoA@qFuD8y~fs0VyE<IBWeLv<PS9X)!MWG+%A)@{i-0h*t3
z{@_zdImZb%iAZ`(Hz~=^q76klnrSd92h?f3bj;w36eIO9#kfeHDbZv{kFT9|`pHHI
zeOO_nQo&(3%I^hOt}@}6q8CsPKdc>A>sw(zA*Vei0y^&!RG(dPG@*K1A*ktO;iZ_|
zJLrPq!AS19f1kT{I+u}(LHcP)E9B5uSq3;-*#h{evJg;Mk4($@)qumyzQ*iF3|DPH
zUfytlezvR<EQbwGa>CX_FPhPNnTYFY%eMkv5|R0aF?xGB*!6Z?-N)>4hQBc^t-1vK
zy(;uty%D{(Z$!Pg0`UgghpW+CU41S!JtbGX!kPSnA1?8))mcEBy$;&3sfl*3ybWJA
z3tp~!H7`=FwHU0YtrdV*YTE$|>WTn=Va!k+@|PL@$#BiH$OvVKZDpSW2J6vlvL3z8
zmm_c6jQA46afS~!Ut5sQavCGT7*Vpi({Pn;UVD5-2iw-|j&xA}KRY-9S^U@TJpkWm
z=iUJOiFXaJvC^tg$0jSj0-ME1Kde~+*1eijfKR?p6VWt6cN**@rZG4c<Q!2D7Q&b$
zA}4aXoX7PiNoNuS0lMATnn~25$;J;bBKv^R_&yvcT4EPSw+LIJQkuNAUr`>~8c=LQ
z%br97A#}E-ix5>PW7`W@x?x8(pv|JEQx1>f-`yHi?7v!HQS9T}UrQKw?Q_Q<E}dxG
z1@h?MMF(wLH@FyHSxh|)|F|A$Zm0(xsc?kaoXuF1Fmb)p2XwisAMlI|$Hsk^S6S`Z
zfvaEdSfel{Ze)(SD->pmF)dweG<D&y_^u0wg|-_RTQ^RK-ORqg>>n5!J;*nEK7lyw
z4u;HL57zQX&kd|)yW9YHR>s<eGqwBn0Q0d=R2Y3PGQ+)f3iC5#JbgF}M*DC`sQYn9
iEbT|8i{WX8cNrRYBHzmJt(~`&T=x6l#`@<641WVh^AW%R

diff --git a/src/wasm/Hacl_HMAC_Blake2b_256.wasm b/src/wasm/Hacl_HMAC_Blake2b_256.wasm
index d95e2b0f601c17e131827ee876844b4fb3d2cfe5..bbc821ef32b6d65835c88ca021b01effd2860dd8 100644
GIT binary patch
delta 257
zcmcc2{fv8pm|Y!HeSHFBJtH21xxOAG&jzF*ELJFkY2CyS=ZSx185t*bOL8RUWTxlE
zo0&{Jr$2GJ7$eK%UPd`a)`{KnlhYUlCaW{bGqO)yr|8Tf>5-V66Yr5&oDuJolbD@q
zloTJFnVVu{YNn1;CO!kCygaicBR)H|a`J7)35@KM3z<b3nI<<eU6WMcb3DMHz^K6F
j$l%Bzz{xG8z@;dnAR+)$)6TpLu0DcgA0z8zan?NmfD=tc

delta 215
zcmaFHeVKcLm`FWieFAfRJ&0li6Ln1W6XnE2*nnaXVOA)EY4*er=gIDj!Ysx{42+Yz
z7!@a8*PA$9mXT$09HaJRD@FsLB=5v?oQ&*~Wf<iQI7B@XlXK!d5{on9opKVhQ;m}1
zjZDoXz@kV3@ktQL$?q5^FtSdrXA%V(FqP?=q~iew1wI8P1x7~(M+N~-ZYc#WMG*xN
W0hpRu%)8*~vsv~ra!l4@-2(th13Y>F

diff --git a/src/wasm/Hacl_HMAC_Blake2s_128.wasm b/src/wasm/Hacl_HMAC_Blake2s_128.wasm
index 5b04e5663628bd8c6c5e734fe1c2651170be3455..dde8629ec17f3175fd1850f94720376d4aa216a2 100644
GIT binary patch
delta 260
zcmcb}{e*jhm|Y!HeSHFBJtH21xxOAG&jzF*ELJFkY2CyS=gH2DvJ=-zP3B?bWMrBw
z#;7pylN47{YDs=hs<9CR%j85xIY!or>y#MTCO0r@GqO*-sOZcg>5-V66Yr5&oDuJo
zlbD@qR2(0inVVv0WTB2zCO!kCygaicBR)H|a`J7)35@KM9auzxwl*<cl~CYwR8U}4
lU~*(|WDwxwmQvtS6j2ZnfGKHZ-UU}4#<Gu*b+Rby9su-yOJo25

delta 240
zcmaFDeUW>Dm|Z<%eF7thV6Lx+v%vz4bxie8IWV6Mr~smd70O_mJu$?2vOA+Ni?I;{
z<78__B}S&nU5tv1%s{r}#B-dCEE8`iFtSeU)?{RxEW;?r$UZraQQLq+)FUxDC*C8m
zI3wOECowzKs5sux$U*`viX;%91d*Klj&TAb$K-k@QAU=@Q<<(xI4UUcDKIH8Ix;vi
f2yk*sDR3!@D2NEal+0k>1y`QQvJYsh2J0RG(zZdS

diff --git a/src/wasm/Hacl_HMAC_DRBG.wasm b/src/wasm/Hacl_HMAC_DRBG.wasm
index 990b72e231ac84134f8464954de2ceab58ef4aea..c1cb2fd3971da9dc9404845d34abd70b20e02953 100644
GIT binary patch
delta 25
hcmdmejB(2`#tE|-n>Nl#Vq_GX9Ka;HS(-^95derW2)+OS

delta 37
tcmdmTjB)od#tE|-J2%crVic71NKDR&_waRej?YO=PfV`d%)z9S2ml-64W9r2

diff --git a/src/wasm/Hacl_HPKE_Curve51_CP32_SHA256.wasm b/src/wasm/Hacl_HPKE_Curve51_CP32_SHA256.wasm
index 7f40d696b3c8016523a7564df64dc38b9bb78558..37798d12fa6459f580f26a327b62e117016dfd1c 100644
GIT binary patch
delta 495
zcmdnBjPb-W#tCZ}4{uy+$W$-ok(itl@966267QUmn4FPlWDt;_Q)y^yV5)>$Dn2zY
zxu~+Bgn^Gt{VAys{gdOFUon2z?8V~h#(T1(p@D&osZP3{u}*mN9Iy4v^_+_43Y-dT
zj*K}#l%;5)V6I@Hz^=fiz^cHaAnnNDC;*nwU|>*C72s2}L=odqv{JCrWMELVR<L%e
z4`iu#WMl=h7;6-5(A3!~*eckVGcY(ZDB3wPa4Xs?*t0q^aw{r003jbxlt+<Qfk%PY
zjDdms4vH%u`K@KHXIC@>Im%HW8|F}R1r7x>un5?tQYbD}Q7{+a2l~ark-?3nUIAH>
v1Ly@ykduKfhdbE{<Ya3!bv6n%3f2%OOW1<kY^Pucb~DWFK&MZZ3zh)@*93C2

delta 479
zcmX@HjB(d8#tCZ}k8WIR$W$-kk(itl@0^jCoRMf`5Rji!X=rR<8gFc*ic2CsF*PwI
zJ~c17sIs7hfsb4RQc^(%Opa!L!}wsc2aBs4@4=3S1_m~!I_Y}GI>F7eyw)?<OFJ?+
z3gkF4=45FwFes=huqd!9uqm)BZ~`G4P#7d5z^7=gfUJ;1(L%vOlYv3eQo+)xK9Hr}
zk&zY1VyscLLQ`k0V69+f&cNWvplIXBz^!PjV9V;r$gOCnV5h(b6qMjm<W=BN;5B1l
z;J$+5x)*+Hnd_xcT&DtYoufdu0te8E>_7(!WW(IZuV|)V=E&g2Qm=rliv#F%bC6Sk
mE`~eR0_0RnG<8-ARtlC7r&@!YYNKEy0Cp>qn<wi8%K!kLBXC;)

diff --git a/src/wasm/Hacl_HPKE_Curve51_CP32_SHA512.wasm b/src/wasm/Hacl_HPKE_Curve51_CP32_SHA512.wasm
index 1abe96898d71c8c29bc422d6796b65e44679dbec..2b7c24967ae0945a078cddc58b2499b275d0e152 100644
GIT binary patch
delta 495
zcmdnBobkkR#tHiwCu}@W$5b!nk(itl@966267QUmn4FPlWDt;_Q)y^yV5)>$Dn2zY
zxu~+Bgh7Bz{VAys{gd}Izha!Qc@;~d8}I3kh6V;UraI|*#ya8665bn`>p2z86*v{x
z92s+fC`-{o!Cb*Yfn9-3fmMM;LE4eQQ2;EV!N8!PDj=X}i6X|KXr*AK$-tm!tzhj`
zAIMVg$jAz0G1e&BpsBM}uvM@zXJBw-P_%Po;8wI(uxE8-<W^L0075>XD32nq0*?Z(
z83P0NJrq|a_^)TKXIC@>Im%HW8|F}R1r7x>un5?tQYbD}Q7{(}1p39ok-?3nUIAH>
v1Ly@ykduKfhdbE{<Ya3!bv6n%3f2%OOW1<kY^Pucb~DWFK&MZh7c2t+6MJ&|

delta 479
zcmX@HoN?E3#tHiwCv7}X$5b!jk(itl@0^jCoRMf`5Rji!X=rR<8gFc*ic2CsF*PwI
zJ~c17sIs7hL4aHXQc^(%Oy14>hOuGu3YJ7S-oqUY4Ge5db<*{Wb%LA4yf-q}OFJ?+
z3gkF4=45FwFes=huqd!9uqm)BZ~`G4P#7d5AfRZjfUJ;1(L%vOlYv3eQo+)xK9Hr}
zk&zY1VyscLLQ`k0V69+f&cNWvplIXBz^!PjV9V;r$gOCnV5h(b6qMjm<W=BN;5B1l
z;J$|9x-|dw%=J<zu2X@y&QTy+fdlA7cAx_VvSDr%R5VjCb7XL1saHVO#Q}7>ImoF%
m7sH)u0dlG(nmQ{5D+No4Q>{TxwNbDU0J{~*&6C#z%K!k{QE;OG

diff --git a/src/wasm/Hacl_Hash_Blake2.wasm b/src/wasm/Hacl_Hash_Blake2.wasm
deleted file mode 100644
index 5064aa4a914bc984b94700310b5b79b482c220b4..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 20644
zcmdU1cYIvMwZ5}jSu5?zT1&Dm%k7M-j0@NXn+|KLDG7v503q8lwj>w4vT3p;8_Sp)
z2))AtLYhekgqK1RhZM*oBq0UzNFap}Adixe5J+h6`)2OWo!z~%Aulh#_x|wgX>-n*
zKDzTk(3W4Jf*?>YZRqb0<aE`mdQl|$`c7AUYCA<mUtf&A5>g`%)s7!x6s!?ZG0L!7
zWMh(`VK3{(dF?gvSi@4zIC2u}HSwEGXmMTD;caW$R<w6jtY}}62(sxD&>_dHUA=nM
zn(juAZe7&gy{fBy-mXF5n~aV#X<gm6xU+pppnS_BXKh`}&sf<CX9?e=-gma5t*dL*
z;-JDeDDq(wnmZ>*`X*W5NB5ex)j_4Nj`1*iUNFQr8S8skysUk3XX~Q1OP98<Y0WP`
zw>_xxZ5w=Bbl|z|YgPrR=0n>SceO5DvAU~uL04O6`<&JTR;|o;x2^2XPaj;>+S+<X
zdpCldv7+tRwnbg-LG^YV?X*Kj%hwq%tqX#he~O=Df^=c7)GTT1Zfix0)^u0&1(gRM
z1FF%Kt;a1~**$yC?yYO*&cXOXTXA(irdeaAc4%F8Sbam|@DU@MMm3KfGj`ngA)BdE
zeL)VcpFf|oag1q}?X^X%@Uh5r;G)*$E0=c%wG#$YuU(Cl+FLu@&k2T3+JVuEwO!rI
zgSv@3Fzaf|cL&*VgIO&_M)_qy!^lAlg2q9shEsdE-`ITA*t~7*NWZZ~YOk?Peq-Z|
zY-30Hjg2!ijcxWbF0u+6JJxTkZxD<dv})Mc@zr+DU<p+uYHq$}1Lnlj_hQJ8OY~Ov
zRrFT$C3<W7RPVIDOm7Ci3H(dqALab@>A`i$Ue#y+rT&RN7?1yQK}!(m>ReXWNKLk?
z+3>k+PIcu{U54q%CUSwU(kZEu)I1ocCA}b+9|Tg9Q=Ph|Vt)|9;mQ4twWPtNx;i91
zE%1zrJY-a*O2doJKwOLZwYluVLj8;$DoIEX9XY734qYd8@TIeo)x%_%u9u-k-68c%
zqBf^GbZw0PTv9hkgKJW9drfMTM%$zYHmLzAHF!;G1ji;dhE1|YF(IW<50^x;cR^p}
zf*`F&EC}>S87>vMAeS|5u?}+Ef{AKjvJbzx;NWDRR8-n-SGmOC-QFZk#hy@o>y{!>
zYNCR$bL-|F_Dr#P%9zg&QpkIhH0fq(PQRb9qh)k)4EUL-I=60Nh|%mddK@F8^;j93
z*5lCLamDRTK{R51kj2l)`9YH?JzfGWm~t8&t-X#GaH`Yv^>{~yolcF0u{RULc@*R_
zc5FNJ1n4Ki>BL;pO{fWal1zXEBM4VWqCgJl$uh}uGMTNJ$f-0jR~`bQDeyWamvLUF
z<Z4!9Q1w(aqeWWWG~4dVnI_ZhG;3jFTHvz9YyC8EoMzL)7F(m3bYz;|Np{lHWv5&?
zt2^`zFsVW}%mm~5WLECgcOH0seYRTfEIZ58T+oqAc6D&o@8UGGWi~X*Y34Z1T$u|^
z!fAGOn%!hKXeyj$p3}^i`OqYtW_PFAL-v5C(rNZ|n!RK%Xofh=-cGZR>;p}e)9mXs
z`^kRLq?~4dr^!hUnrf$6;4}xw0npSq&4EsHkQ@X}+G!4UnnUCeXfjT7sM9Qzh0xSG
z&0$V+xEv16P^US<X^xa5p{aA44@pLL4QHqRFqqv7%u!(WE-**Sbg;XGET`*q`7jnA
zevabenl49kab;v)p~gqR>{(!r0kdy`IaX$Xo#XkMAs?Zy8FCDL&5&cQuLBD;j)VQ7
z1?G4#M--S7WG2}CJzq2BIQp6?$J5tLIl=lmxKQIn*dJD4P6G3x0&}v=0=vNTHA_yU
zuUT>uea(`S>1!wbQGe>6;!pik{i%N%7EfzV>C<xw-IlA+izJX%%>KeUSq%Pkk6!}&
zcC5;FZ~IuRm&#&D!tG;;BYX52vea^N2FIhF+edrMdWC2iye=#9x*T4Y6}*1bkv+OY
zmRn9b=ye&rE{pMs+!uMdcY^oZhQnL0K-3jp)K0xpIw1+$(&fkjy-HSEPF6AM3PxQK
z7qwHbhWJc)KGXBOTCb7SkXX+;FZrC{aALW6lkS!^RwvzbekPrt8RtBZHm^x*OPaV2
zRn9`-v%J7-_1Ur(lCVkZ9NDAKk+Ur)=P>G7jCxjlRDTkji>Q7U=b=guqV{-E&(-VY
zTu8#G=Q(mf_sV+9NiU=JFltX+)N^$o#Qh4c`<3UrPj67tM_JgJ8<lKSxh8Zcep=X{
z=PQAYwUF}_<A6aRl;x)v+T%}*O$q|P!2_(cnR#sX^Vp>H1xjEIY1qyIkLl4{l)%7h
zWsAuI46;C3o&`h~GRB1^F)lJOAPr-jLJLdx=!=y=Zv}F(2>}KnP?m>4yZjY#2^;FS
z{8CoBj7ePPCvl0=A5#KrNW&y9^_T&Dxe^#ytz2%h0D~-0#$*A1S5WbBM*6rP=?bN<
zRC0x))JD=*DY;7JvKU<a?83plS_zD-g<Nf-fk8BsG0}=A>osh%-wD^U(sj(_IzN+Z
zl)hdGtRW5Ce67dy=o^&4z-r|NlLZ)LfigA=e@5NNEZV%4d=o3(%q(v9v$#>|Ta>^W
z(lCpgJZ3=OsssjBE4P{~z#t2hF<HRhZB*RONVofuZd3Z-l-#B$4SW6$C3mRYe&~7p
zDE9oFN?>Fy<W3U}45Fcoi55h+ue;FT==${ua_;_hx01V*{-lyms%b$=f65YB{b@@y
z>Cadq(4QqCpE7|#d>TZGlFxw1lK3o$Ci#TYTa|2ux2(R$5>5JZmI(CcEs@gql8}3h
zHxQpQ-avfbcmr{-@piY;Ur_P|cx%%4St8K)TOy^uXo;+TfP~y<yn(picmwf8;|;_E
z^oDNwcl4P468@6WUsgDBzrv{iS#;=rl@b_4k_XwnU*nj4%^$O`D*bgOFo!gp9S?d;
zkN$=d7+9@*!=wfV<At((ydc`f7~4u>eAC2$G>ma7EiB!mA5sFn705#-1Q>)sSsnrn
z`-<PNZ<RIv+pP8-rtuv=jc+OaT_rGwG)&{$9y6dHRssX7m4{6pV2}sOm^|R`5h}jN
zNZ<1#J)-n~D0xItY9r}Kl{~6)Jg&_T7Q*+Hz{pz2_f0e~h=wvIS|AweXgD8ZtBd;K
z2dwr(=JG>7m&cU;krJ3g8n*fe9@C>AR{{g8mB&pUV2}sO*gT4+)sLA+QD#43wVyJN
zpZa<HSm~cBfjOjM9zXGz0sVv$7+9@5Ve$ZjJW$5u0e??Y@pDG{xgY6CrT<gOlZw)?
z_n%Vols(<?V@~&9D1ni+kYAW+U=R&uOtg~a^~<v5^)#zJ!(5*6bNQvxzfuBoNW)e?
z?J+(2*Ggbuweo9|2N>jmvOEune#02QDT(o069dvP#%Z*$bdP>k3G`MV&zcZm5CUa+
zh|+2GJ2tgwZ+VW@e$O<1@2BxQrJq*<b4bH9p7WRi{elu0SgpKZ@&JQ8P{!l|e=kz;
z2S)mXAL&J<|4Ye>ic%X%zog_Pd&1+#obWFzfswV4mrXP<h=wvITFE4Sg{>}HUazv+
zADPP^{ajvA`cFz=4r$oxS3RajzorBRRx7WWJis6il(Bhy3GUq=Pk&|}Ma%0itoAzd
zc-_zA&r1JQ3Ctl4^Z1L$4CptMz`$zd4U-2L<bg6K5BPhNioY?^-~32#D*bmQZz@W|
z-hWHUTlSR4k2&SvRsthyA#a;#U=R&uOf>J5e+Mm&PWgAqxl{f<CGRNRpOF3pPtpyR
z2=qovr1bfg$m&fbWJAK-+7q%7M2eF0L1am60?{PzDm{>p0eH*m&6a4=7g!?DTP%^%
z7m|?8#v6zWj5iQlj5iP$8gK6@eNjR#g13~u*b-TNi6xr!rIx_un1ozxyn(pHcmr{%
z@dn~D<82DNYf4;i;VPXF3`yZH?8_7Q)OQ7nNc3@X?o-v3<XkUam4MT$6ByU46W*Uh
zA4}+K5(3YVhBF909W$V>O$ZFEK(00EgF%ug%f}R=>lov@k{H*U7?6fBkeg!$^bHAt
zffdLNCI%S9Kp7k3M#i|&k8ycI-;@ya38Y~R^oC;w^vwx@ffdNjCI%S9Kp7k37RI=x
zB*v{K2Bcw(s}eM^%z(ZvAuzB4xy{4?gBU2|Vn}t7IQ_Sc{r$nPL@!?>@RB>ozl!i>
zp-KY09C*WD?T>zqKlthJ6IqSUNCw|P@m1e`xvasHz);TIIw!zm02jtEC#aWtuDb>&
zz$g7_f#~5*FhWL53tIF@C%^~wX~8afloQ~S__SbmJz8p{maDTy>bN#*WEfXwjWlpw
zre!#FX@1L2;b#<eX&FtO+L}vk-HeanwF~<DH>G5ZB(@F=4B*RnO2)EQ%6d#$k16Z1
z+Ip<E9;>a#YU42%NELd&QV)?z88Q#=)Q_<D6N%n5J|487k1#m+$!+)L053baezny!
z$$YSv8l8rO{p>zm_hSz<dprG=z4_GQoh8^4Ex=Ok)EP`Vyxq7rpIVCX<+^re;29N7
zoA6tGErzlJFFHf<oRiSQ;AfcUCkqbW|LLmEk)cm&Fy7@eRFd`)CBVL;FcL$vnMxtU
zK?0L*mZ4~DE}2c0PUSb)nM3Sj93R6f!cN6|mQEwLv{bqe{%I@`xar!e(en5%Pw8rm
za5aWWqrW7>4zINxZrbfXiy$ZOQK$z~d&f>O>7~N2R^o*5iY-z_7O5U|EBfAdF;FU4
zU!vbMz`v34pp+86za@n;$4nHy&DD#N1bTXy)Mqoz&|x1*qxYh&umcYhc4{O}--3to
zAek@^qZ!xln3Q-J+}@;&TyXl@Nr|wXcEG&JlwmfQNttj0S)-yP!bzFIaB<2cJM9wa
z;C_QiT8l}V{KqC~lx3m)JDf7u!6Yz?wBF8{dc0p24>ukqqld&E-S|G{%<%VNgPk#q
zj2BA|^a$*dJD!ewkhBRm*=kMLOLtriyKGpk*;Z@Tqu)ohG1yIQv@y2Y7(MoVR2wJb
z!fNAewQ+cTEP0X@wASlaE~8?Ll%QeG0?P=8iy&zy3I-rRI+0+#jQ80AZy62OVpfe3
zky@^r+QRl!vP9k`d<%Phwy@XZwCq<a@lwy<)m$mq`na$TKNGLW<~cqC4WGm9bi+a&
z>=hqVTr$|I_?kX1z#baz%31d)yq(H;hnE^w9wup9xgIa`cy3Q?bPXDAUhmB#<M1Lr
zsWH{qda1xxGCW&{xe&4=B$>^Y?$D2hT~OtAh#gNbc1w8N8K44AX>1h<ZVi1dS~5Gd
z*_<5q@JJd(Mk=$zD(!t>@W=6JZF8H?*-7c`^VZi;`XU%2@OQmCi3swBv?{j^dP%t*
zZyV0?l4CeJ{dG0_hsQAP&z(9|u|J6D7{=*icD(9v$FoLAAo39Ic-2IEVE)wPq_E|o
zhtM3{Bq_jMQRA4<b%;>sUBuJi954e%967$KbIH_k@5eD#77d~-{A4{p8E}!Gp@u8?
z!3%na&c^sb#uopfFw+9v2%ng0?h?`fF1$R{J2LDEYPemN<%S+WkMn9!a?iSa3kIq_
z(K~d|8!m6Y6~OnHwWe!;1Mp9-avXp+ZE$dLGr|Rhd6}Dr*AO0D=vH$Fj*O6e<RLn^
z5Ed)hO`q+C#o=$m4rSP(=s%3H>pxx$7?xlghP6g<Ve4=%)?zGbqsdYa#{Es#0EQPr
zx<juQF7d-<IB)eMoM5Dk<gLER2}a2%-s+p30N@ke>c=<%Febd!k8^_YGM=}3aRM!x
zxB3ZAFi|G*RzFD^h(ZbQ)5J5bL7I7UZ;&y(xi`u<>Ka9;Ym^DpHOeIFlJ-_VSw`4f
z{bU>{0|Ntk3U<FKtToJf9A-TZvmWcM$9n6r-g>OJ9?^Z+{VKT&X6u`|1x%HxZVY3#
zfN113z$SJJn94z%ia`XV!wq5!I9_^M!ijH<;s&ur1C640l4;QvFdd^h!yna|s5lD|
zW_e3@Cvefyong4b(w)VnJ1Zs{L_5RJ&Yqv?;37XW4Oj59GyUuw;|Ce<BD?6>qVybW
z0dwJVuGjiG;KD6nwj;xyphm;Ed=*U#w}57BL)fIUDW1rNDBZo^A+{5{_mFVWSLU+#
z%WMcoE?juo3SA{tc9jib#34v=h?im&IIgm)aFtmjq*bAlJQH!KhKrksyOiKG_EhOx
z?y(tMnJC-|kU|-94<G4xOqTr>XD$ppwg*M-0U0D|RE>-fKf?Znscj#*XFn10uLEp2
z@Ku7`V~BeC|3JvS!J1^Q8}v3Pa*y#E!zS$rx!0HLLx%&ohgKWp-raP~%OIXdK;z0H
zBj#a;0F-TBkij&>6&SZ(>{%FRfG6ET90d+jk-?iB8D>EZHo2IN4L^mU7VpG*44jVf
z)(kOrqbV`?EhJGO^T6GWwVaG&%44{K#>9kxXgs`*&(*lUY(X0+o1ht5wA+Wwlpd_#
zC&+}HnI)Qy(Qv7~)=vP(rcVf4Y>i^lkqH|6Gd5=+_IP5j<&(jfvxLkPFy<^FGd0}Q
ztGNq`cu=-!P6K2dG!c|-y3@>%8PFsg@M@;h%#vBqR5;DfPJ`VBnh44^+iB*=9B3ja
z+gzvFRd$7Di1W6a)6A23&_qzS`A)OD><&!?W!uAP_LM!LiJ)wIInCa(H#8BHZ6BxE
zSN4S_g0k)BH2cf`&_qzSoYO3j1<*uLwga5zKsgYa2+DSl(;O@ZLlZ&S4gq>=W;i?b
zp<regn1x_=D=>!vcA3G!gCPy)$0RwFi))fB<l>qnhjDRbcwShf;^AQC7MLTz%r7uU
z%4D$9JztaMaQd1oN6^<~Inw&tr%;1<yIg@G-tM3RbCgU0yNBm%ihPK^rpSlsYl<9Y
zeeGAMaWw1?C@{p^9a3P9k*Q$!@_bE|qv>m^e1yKH$}#jcQ6KA1{p0+pf4o2SPsj!O
z#GKM6<r4bjTm=wtft(2Mg++4;_>(;TRM?+}Re74XeVn3O<rGTn_Hn8sJM`(&YB@Qb
z+sA3#K8T(x-TEPFgV(kquZ!Rn66^I?M|SANvdD6>m|olHwJpXga{s88`x5Ye+uBiO
zDWWd*qAmgYZV4qeYP%!zdYJ&2M};h7)TNBNG%o5Ay&U2Wc*a_F+u?HE2?U;7y<O6k
z%dTV5y+*G9e9mZO1zmU0bw`}*Ku-1=w6dgut59V%!mjqhuGD7&f(MCh&?-lE09tpZ
z<zx+`u4Yt%@Ja^<QT!>82k&RmjVfyqb*&dQuh&VQ5*xMKk$HWVth1b)#i(l;b!}YK
zygnP^bKv<L&-2;(T;T5@F`o5#a$YX84n-rnu?OHeqmdrEK8LQ)iE~{%57w775Kj!c
z4`KVfu<Lcd0s;>r+oE1acIXXCpfoDkpcuK2k#UnM>q>|=D!2yXE}9wZ_4)KonRR|#
zM0e;-3Q)XI0N5Pe!xH{cmIpvn{3$=6An@n>W>&g@DO}*EFrf4nC9tN{Cb8LL@&LvG
ziWh1DoMRR+MHVPyvVgyfsJNJsF7_i`q%<ILK;Kbn$M8}mfY7DZv24OHUS?2pRu6a_
zV}U^|lrgc2XCn}G#a(bYD_y}nfS!vw{bLHiH{$OowJio7&oLcD)WN_CfSh9%V2}mM
z*ev`t0{C5VT355uHO%4~KZ~oBzLrQmBehuor$-aR<N=El7+3*dbIbw^vOpP=1^iu4
z#SM&fgCFU7rEfIYJ4$W8-=qWpx~g?7N1@+uHkdi92RM$gz#taNm{{TC8=Mhs7rwoc
zyOo@~T-`>joxWYk?W!e6YJ-Gp!Jp@MSV@NPII04D7YRV+7#PGIAd)2R1d$?f7l;h-
zdioOzXf-;y+Y%Y#;3x@*gCmh74i08_8*d;!X}p2>l<@}Q)5hCvia@!~ph|{7IT8VJ
za3qq%!I4N22M4py8gC%B8gC%(G2TFYj^5BspGS}Bdx=x~0&!~haUwt#ov8Oy0)t5M
zMRxB49I*%d5xZXj^aeznk=nTd*c?p^(*ZObP%5DS$TxNyv=}dx<>Lj>ml@;BB{9BY
zVo+*h9B;uFyh3&8uPQ)7LIEIi3;|1oKv^CF4ZDwi3YUim%NlPGbYEv0U-#2^P!V(o
zb4qO*U-OteU~vHAg<8Pom<LRe2g;Z{;P0DMJj6&3`H{Y<^tTN5j#3jze_ILQb2y7x
zVi|nLAm*$d&^X2dgIFkIVg&#&dTstL+g#KO53|}M%mmoEXaoAL0_=_GJ4$VvANH6I
z;^|;uwZP3W4=~6BWo#Zrlj>3CQIy&DS?w|A@tB{-qe}mPNIfI9c>t$J6T{>IiW3-E
z0YG!i0}S#&8IuS6{fLUk8R>C9(vOt>u|eKZYWw~tN&ue20gjR<_)iU9&gub-V=OR;
zg)$~q$<hLtu4HLF!D>%36CmfJzW<p5=#AJrN^P5;@R$yw>0n^BK+Q1^FvtUCc^(iw
z#TZYO#Q24YL8*;#g0V1khyJAk2rm=>F2@kCL<p4SA<(d*N%b_FTC}e`!)m``8o%<>
zcv=x~2Xjho8qau49*{VI@IoyhbIb##$OC0e9`N^DDxPJeXZ=XORr+@ZcSos-q@Pm)
z=p0UVlswsgZ_sj94_F*yfk7;kF|kS}@bheQQ7^o}YA-Sq;O3&?eO>|bM&uo(w#_ei
zOb4-aFtA!+=9mW<<bg6ak9+B;u&n`kSCZMwto91?c*W1-C8b{_O3z4b9xr=L9)LK3
z0qfweia(k>z#tElF?qn>pQw0^kzVs7{YmLR8`K@8w(tL<1i(3*?kIV>zizN{Ru51d
z+YAP=P{zdaPWQi}z0sGGH^{lu{Y_%*0C5BGjVI{eEfMIqERoc2TOy_3ApwXS1A}-A
zM3ThYAW|gW0g(YtPXht>t~qqyvqXmYH%bEH-$*2he}mb3#v6$K1igXS03t<VBZv$z
zdj!axpI{XL<Vd85e<P70{*44K#3TTaV{RY@j5iRQjW-Y%7;od*UE||=3)g3dPecC`
zJ{J%f=k+4sab7P1Bj@$vr3pB_jF_v-e9YCB1YvLROsSne1}hgbc@4-H46GLTFQyL$
zNun$tQ;4o$j4MiF04s(Vl-d}`%`tfmv>FVoK&~_~z#s<7*cewa1`v1Adb}_Jz#I^R
zMrvcAH)vv*JW+TsumT|N*bXp=figD6wTy9XNsQ}E3`%Vbzz-dh*9K%43V^s{3}_Jp
zWn7HXC*z-o2&qxw7xF5>H-CSCtNrn<@rSp7K;wIHhV!j}KuhzHjdQbrKuhzZHs@#o
zftCgork-=PfIu@Z;VpR0Dj?ACRar)~1dR@Xmd3~9rWQPJ6%c4?{zus@!5E7`W5a<!
z<9Y!CjjII+G_DmO(5M3fjXEIEr~?9xI&9+I#GmB~__KUIZ{cUG0{9uL0Di_Qcmrb<
zz|W`y{ERxl&!_|Z47ywy{OksHE>Jracor?Zf>h_W1zYg|y3PEr0o>=~ZRIc7(Fcov
z6uT37h@L>D0;5xl$A(1s&K!M(ufvm0_#nhbW=n<-LS)T5bD7T~KpC)r*%6fzpldlE
zU;rqqMPuUtWpD9;2;Gx3ug2yP1|3rlCaVy<{C2=e*VZ0%1wmDyd2aF9q*efsK(q{_
zeIQWP)f}FRL~jk^7ozWYSiILo%=`Z@kSe^E1?WACKvMuuna*+}Qv$q*!_sUEES6?l
zFc_8weOGJxF^3cQ|2mdtR5NF(fz}X9(_FeSKF7(4$-%W7O^&UAEnu=0V`G}iu`$3x
zW=A`045qAE5nCxOL%G~uhH{Z2kV^<UF-h1*$_SzIzkqSUO1BHE2;+joXHblby+iUL
zofsE;OmUO^pct3P54WGl4>J0fVq7E+;_`<JFdy)mJtI6QV<9YPLG+Ydx>1;8h}&o!
zgaxfNhiGJkjWF^M?caEIEQ7EN78Wnc_Stbz2n)mV)tO=6H-rV1u#toi7BUXPLZ$#=
zN#p<SGMv|f0)!=vN8*vZ78D>XY5cEMM)6uufUu--mXGGOpa5Y><1`-2Ye50Rk_H}e
zJg)@>2uoUP(YzKEAS`J<k@8w#AS`SbAS^tq0b$`;4G0U*aX?t81B8V-Kv<{)goU~!
zuLS_G*lWRL33x3q_!U+GeuY(lUttyCS6Bu373zRrp$_;J>VRLN&d0BeY0gOze5HJG
zM}w~a`REn5xQoy$9JayGE777PdIgr@Z26$*mB<g#E0LdnHG0J??*BP>CD~=cD|UTZ
iTnaAlrQj8`Q>wxqvqo6f74HkYlD7YEv|wV84*nY~1%*QZ

diff --git a/src/wasm/Hacl_Hash_Blake2b.wasm b/src/wasm/Hacl_Hash_Blake2b.wasm
new file mode 100644
index 0000000000000000000000000000000000000000..8882f5e8e067fb9b2b6200202086898da5be147b
GIT binary patch
literal 15858
zcmb_j2Yi&rmH%e7l2*GbvFa<)-jfVSfCL5t7$=66F)_v^4%oytw!FX^w5k@89fOc$
zi~z@WVkd5Ki*xBk>B;3%F1}Rf(k|taT-qg<a!D@jQqIYx{@={^&Caeq2$$dee(>$Q
zGVi^a_vTHNmpwG7JkL{)+&(?+HQ%Tv)g(TVsi_;)lsdzwY-*~QUJist*+PZJ2v?yL
zLM-U<WkPwN*;b64JP!M6DtBZ@vxB*TvccS7#7iZ1!~C7&BO}A3V+~HZcW-WNcp$fa
zo#(kM^TSwrN3#3+bNfBzaux~84)h%u>V>d~%TnhG8_W(24Da*GT!tbU#-O!pyr|2P
za%GH-W=FhommDJD-1T0C%d*gwu<u}QUw`l3@tbbSjrJbuJDl?>UEX?^7ae#wH#+Ra
zS})G-8|d9WI5N=NGm!1it?9jRc<9hrc4+KSn<ibHJ#^3~9qR4vJ&+rN>;r?@UD>?@
zIj`yr_4#!5`)<*ddwaZi;L-U0>{zxJdPm2~ro8fPJ5g%T-`=aX4~?C>W@GR8+BNV3
zsAcy|BN&p2WOdEFRBc^-Lt|6LNkOS8FEgzYmA{r?u3A*8esAyi2-M_y2ge7-`n>9e
zr)3z(9vbs%(sSv)qidVzl16ifa${ayNF}4ys}CXV?>mr#s)nYy@@^W<<-Eqla|s8<
zA-DF89qjGT-R3n_nVy5Y%Odf6e<eNh&9~!wRb;Yis%)}sDl%C;r6xP4l9NgNM({t1
z|ALoaw|sh9bW%;3|FQo^8KO`AXS@#2<El)G;}lO-wrV(&%BX=%Y(N9~R3zhZCC8|e
zV(Y=2phS<i!Sg7dQT-e*+k%3(Bf3R%Qvy(IAWw--&tWQ(kW}R=0V$jWx*GMXGpX%C
z{Up~=6c}iS4Qe3I^JpGqaf(u0OSN1_HJY5KIx(U;qw-uGB0m%5daAcgik_)S4b)(o
zR4+}che`ELlNta?lN$Uc8Kw}G)WD4tiB9%RmG^iFZtC&4nHs4q<7HC1Ek;1*DX(2^
zk51t?<86yhQCYd^c9n^ozT4B3E|!Gqv!{wsv33Py|Llng>6v2om}cML#b9>}rMZ<_
z6KA7#KFu%I0XglefA*B9m@mC15*N^XUPucQya?@GRNUSeP)!@W6n>gFcxh6+m^>zg
z9HXPz>1YC^`gLC~wne|wg~DKVBGHc{FOxLB&2t;*?GW0YiQ0f_<8!DD7<dq(U_?-y
z;U#pAfoO@erd>j*Ju_DYP)i|oX(nl<F3rS8;8b3QW^_=84YM<joKEUAVb&py>43-%
zr}dqHB+NSf78|AzbEuP-({f%x%QJpd=XoW7SkMh!0BoOhXMX<2_x$E)s)|?9Dq5ED
z@|oyBUPk?DOF5U$1w~oP8cSJAYe9)v$~sGV7Cj4;GD}%+DH~`5C{at<XesB>d7zYA
z$|g%WpUwxR!cv}XDbJzjfKq8G7g)+>+6+p}Qnpx1hBBa3SxS$kTu2v!61SACmU0nY
z1WLkEwpq$^>A9dJE#+cM*-qO*skW3$EM*7n0HwxKF13`)=rT~|S<2;<q;-CD@)ZC!
z2Eg+GJUalcq!j>H`=EsD3c3P=4?oY7!L@>}l);sx^+Ap21K1P*I{{n}0J~@<z%`Dn
zmGpd(wUTy<td+FO$l4mzxC;Ci2f)<;E)9U))CF*hBdd$95?NhzwaDtC-A2~7pvDWp
ze@Osb1K{!ixR$yB_BgV-=>;OIo30UA-E^(UTF%$Gp})rs{p;P(zX5}%H>3E*OoX$U
zGTuub^&<L%ak3BajSjpY{5g!uoRdEG@lCW37(adNx5WuQKsOnP4v0T;l0I@F;}xib
zkb1C4Y9FK?45VIXixZruJ_Au+q#hKh2ScR7_Pvhn{ea!JVe;ldXdQI4_VW<+1LL=4
zz!qnCn1&2Q!=iOiv<`-8?dK7oZ-(TX9myj+N+ZA+$$W?oWeAg#d`}$bF&Z^A8WZ6+
zi}0JngnN{AnlxV0#9L71Rw%yJQ9RBsq;X*UCf#C-6MP%J&_HyXXuVam-Wsme4T8ha
z>RNFGRVJWy!qIw|kJ4dae62@pafT=9sDWrwv`&cDi7>5)c?#%h1<}*Wkv+w?E1D9V
z-<ii09aEV!x)VPg(w}!I0*~RKJCx`F07dxDoxRW=H!zMXAlwGeNTm~E#|hVt<BCrz
z0ynUJJ7*kVf=?*|gQ3zXZ3O^Wf$v-^fVxw3+*zXIMOp{2zK%V@VagMHmm<&&g6`5P
z06+!4b5)>S?ufWs8tS(E9;x(VG2+Fp5qB$oi6U?V>l<;81I+Nfiojr~bg#Ao0Ia|_
z#0to}PY5p+O)qsd-KY3vitbZ_HJbQ-Mfa;r3eLsPYH{uZioj$z=mD(_0JPy7qOCYs
zACxw`o$!!UdRQ!Z*tO(A#V=O`Zeaa3KjZ)t{D>kj7%DxYtpETk@C~)Xji^V(imWq|
zACpRtixrQ%Ry?Zs2}R%r*0<s@2bkekC<23_(krwT0AK~aAyz=%lR|i<XnLiq=}E<Z
zr|3x~Sik3ArRY^Evl%^)A0<8iYDHi&9Q10f4FI&^8=}oCtiE1@1{aoJuNBCyzh0;4
zHHu%a==G}8i}4#wA;oVrg*3m(6g+;j6zC0FF$!-)AtsnNp^%con^8#9YZcEbnuW9!
zzr_^N{8m%&_-&>T<F`wJ-lC<U@K!Aig|}&GD7;-ud!6EUD0&B^rTLwv;PJamA;#}E
zg%rO>3iM7b4TX1UX(+r~OGDv3A`RX2_vkTxFLpBg4+>N6`y>>AD@?lY7YrB*m=8$z
zeo%b&LDy&RSNxBPzz(b*9UpLj3I32GFc>O*NSg)#{=#>zzkqs5bUam}<Daw+V0|6e
z3x_FB@P`$FZV>cgtpWg4;5%0Z8umW7VIP^(_>W4pkBJ!{bItgO;*To=JFvbPA9a8k
z{)8eh7%F{2+W`P};2UBG<UK8fPl~2bx|*I={LhM>R)RH}_*05Lr7|*I>mLk+Pb&hG
z;h;}zZ2+JR-w<sc!PNzKenwhd)DNGPYM&EZKIhu<8O8si2<*W6t^TY7Oz`Iwfx%Ge
z^V$voumj&vJBq^U3t~r+WnYwPUlKdM<l6BC#s8`Z?7;eVe9-}B_{)mGV5szEZ3h6@
zfp3T%koOfKd{s1k)z$PB#s8-0D@w3_?|)6v*UapWA3eK&T@jcJ2Yp>@0|0IKhG;7p
zUf-BAyuK;bz9qJN%eCbjivL{^*n#z1{Y?j$;BPAegQ3#5wH*Lp2flOd0O~uU<2xlf
z{zK~k*4J@^aG3H0e^(Lc20`D|DgZzQzH?QShSm3^sYSWv`%>)(V#W_#Grp(zKNW!;
zSl^8AJHQP8P!Sjmm42x0002Ai4Y33Aek6n+i>4pDntr7CzZCsQ3D#)hpD6l?nc?xH
zXZW8g0+ZpOpK5IYpbg&;Z6!hcGii0v@cOw_`-Rx@3)hyPDgJLoU<cN3_0Jt(f`6$9
z42DX-)OG-X9r%XY@m`3xo2Ori9Yw?I*HY~_V#jY>JAS43e-wcoSl^CcJHQM-qX-O!
zO3!FJ0Kg7>L+pUO-wNS(qUm?8rr#?5Uq!!Fg7tg<_lkaRW_kSRS^f`-z+^b+4_X@l
zXu~%|n={M*5iKsv@_!P@&hmd&^hd?h5t@$3OnSR1cznzhVtj`wr1-cL==O+SwMXa}
z3NgXlfkH|O$5BYrpA^qTXa>?!e8Lpce9{y=K4l6qzEcWxLQ6y8q?U%lDJ>0!JGHbw
zD}GUgUIb||zRMI+e77m2`5sfi;#dlFmzIXY-C7z7_h@M-yjV+HD&4g-thca~ju2cD
z?6B{R;Hd9Dd<v+S3S^I}UM7(3#rq==`alGJeIVj&iCz-n2O|W@!1@scnHHGghav<9
zgP@1B`2b)fzH@yF)Wf3V;SwD$*E)dpb-*?Y%<v--0)s)&BU%Rl=)gBr$D^X-QCG*k
z5q>N}=o4Ul9q0`U%<$t80)s)&<5~v*=)gBr#}lICi4q;J&^mzib=)5j76Z)ilMw=g
zLC}+02LR~6H%tdr6`>PPADf=`B9X}iwyzzY+#SeI=8R|8xLe3ft62&8xIQwbI46MN
zBqi<9=`=>6&MqQv&aFc=PWrt~5P)Fm=Omhq+XH*+P%UVjomO{w4pWig{%LVF?ib2%
zr%;0ng$UO|POT#+1qdh5B5Iy3`Z9&WVCKSIh%Ym0j`2N2zQP4aO)61N0(b^Uv{DTk
zn~A1krJebVbY_LQ4wCDbGQU%CqrwT;mY{Nb<eY$$AZB2e(em&vk8u@2$9lL0{Y6RX
z@M_cHy4~)<JZy5VB_?>@SqQ})43)XnN`#2MVvbl5M{EM!ioSP6%#<RIme7yS$Sw_6
z8ZnX^peRY?=s=O%p*m6&K~L9GT?!p7orKWxxX=~GNnnvqEua%moIu}4Bl>DCY5N^P
ziL0qI1!X~mgs%xosO>inbAl-;)*zCSegGM!A|w0&O2S<dGSPlB1o8;d8U$%If;9TK
z1!=*`p#6U}Wa_AnJUvM3Ow82b2CdlLxE3p1WleCyS;S1^S@1Mbldl<fX5#23Za#}@
zawTRAPMd0JZaIr;xcc&|wVG<JJpU}JEuaN{wFRcy0$z9))fUkrzuF>GZ4vIjN-hNn
zt#$fUhEXv`jL<O4fnkIjOM=lB<;?&Aa|(!~wAck_oMF_+q(_Y+qG}m6)j@hH86wZf
z)jo24WsvJJSx>7W+!f33h>R4ZJ{ed$ZpJ;PTnLv_92dJqc(9gJB7qo3Qlto#VGJiz
z)o_9jR#P-pQ<@q-B`*ma-B9vZBUtS5=NWxR9g|CDNe??<U7#(RO16@uY?HO4nvkUO
z)Vy+YNpt#p(ZZmT{F*Vb=OkDot43tWJ-Fwln=hY4duk(-ElBftcypTnW&VwPIoiP$
zR3V87Q-{uXsWPslN|SIZ(A8W~wuLIBaa(lX;DR1#B%De=;TR^EQN~f}b<7KX!od`V
zi_0pi68Zj}CB$Q%mKgQm3y80Rc+Bord>jx~5ft|?C=FAXc$wW(!Q@^Lk3KWvPdkT;
z_-bFgVG0wExhx0<bWG7Zg+fc^-2f%fWSL*qdr_Llsi0Qb>1)XZUx$X*Wny;b1rDzb
z@`U=LO}e-)@Aq+?OgMF6R@TaMiI^-ekW;a+V7wd1XwB$pWKM2`n#N4pssS!fjd{E_
z@G@zmrZG^{C~A;O=en|in;gSByeQrr6buDiSbC>R2zC|TQbIu2OAqC_)xWlkGu{Ef
zFTB7aaNRGviA5H%m=?=sg)D-JWiQlb5$)98=@Fk}5ld)Er`Lh4gt?-Q^D<i2>8<7t
zi@=q6r?(OJ-7U000<(n{$^6hlBz@XKZL(KtrE`SVN=t>-N*zLLrRDlWayAp2J%P*e
z1wGT#$8lYndCHrenVI30)J!X-TC)+^Y(zF2k!d3`ZA7MxNL)RNx+cQI3C9tMhh{Le
z>tsoMx?wH&GCcUv0m$X#$=J4T?m$1v*!4_!<PisWZi9Cfezb{Gv9NRDf>Yxq1nbm9
z3BfuwX?qs~Qk+_25&Er73<D)_2wvQXQ|l}OZ)(J;4HnTzjpEcMi)f~1acbHkTBt>w
zS`tee@n{5VW${3QQsTITyy!{cr$Kr-DUU|5+^4kAlGG}+q(lLZnT=ByQjKxy!oaDE
zs7kzv1ZG558Ie^+WZa018<BA%GVY5slgk-01Ey0Z3|VqA18{kwywlTw{LDZyWT_}p
z{Or4!DkU?Zvd#>PnZeZt(QQsP@I-W5o<Iw-L0cfYO+>eah|YMWvH9kgAvPNd56&tP
zqg8&KF2k=2xBKx~iTE_}XVQ>FKov!1d#0%B=+%hgvT{rXm}rmncvn(Yk9Rd@f4~9u
zbqUNQ5g;huf0@j@W})Cjw_YIl&64P^#>5>oQ?z0EYvtEC@(mX9u{Nf8W?IZ+k*u&W
zK$KEs&f~2*lIL1FXV==I@euk?o_1lTL&oKD49Rk;uyZ6(jd*GnnVmpv`%90~=qS{Y
z^(v>~!n6||z~w284S*fFfc)r4iwot#q#(nOj%KQs=xElDrohlb%@{i#&xiE+r2#TO
zPoM<>GCz<$U!-GwI7d2AehSAz&EK}l%Pq*zjnlbjk%o=$#XE}~Sf_X*>m72uW3ccJ
zOVs-7D<Ymf%5C~pn3+5=(zv|{j$%$wyd>!6NXhhE=ptT9OC>>jv{Wyq(VS(plshEW
z+VPsJgBH`0R5f>+<P$9DFn>g5t;q=FLd5!b7`*~Z(-r;T8se3-62Al71yol*cO$-5
zVXfK~1l;N(>pecV2#)d^u!@|uXVIc{v`%IokJbe(dKRtY_0)woS!-xLt)jJP&<0w}
z8)-G4M;mz)b@TZMhVz}Z*(N}Kn4f2hHq4)`QH%;<0r8x`$}@!b1*p0iIyXDv4P2f!
z=bg3#^22+x1mNZneRKgWmhirSNX8bDI+(W51)RZe4>a_YhImh&Knp^=C(zI%8hS!B
z0Ci#Dew~7NEBtp6{I@mm-!`Nqyy_|n`inJO2>R^~EJ1$>9xVka?-D0~3>NXT1CPOW
z1R5@-i}*6SlrN{Pd<9x{h2yKs0r|eV%oeS$o~Kca3Sqvwk}jdGNJ-B>3!m+Th+WXV
z%kdd-dD@kyi_vy}DZ4A^(OuG`yF&ERPTDR$+bOTa#9vp@PQDtyyCHpdslRsT3ADgp
zy94RFMf&a#=|H^z{<wy&De}j)8ZP+bItLbi?4do@AA1~s7%cp8JzZ~;&>p%TN$3W;
z);3#`P;U_&#XB*DcVd37KSL7QOMB@C8M1qWA-j+E@_v{@y>uh(r|T$-F`EmbP%owu
zct{qZt^H0O63DT<K!x=OG&3DA3F#o-fk{H@GuXa<JQLf-{j`nxf`}U^Y78EK6~R$F
z{py2=As#-929Dq{*B)u$NYKEWNuGH1(IDQ5jp7m6Fxq#BuEW!=0UoC@zJ>6(3ej+@
z6X)_6%a3z;qGgIU&gF3yre0JCi}Tw8E6<RV4x{Q3=se=YH*k46l6Tq;$j?bfBm$3w
z=)+sC>m<Gp(;nj@KP^oV9(sX)5=tgZ!+bJNpao$*87P?)CD_D=HEPNP1dc+mrS}r@
z&<p>tqj#7SAHNd|wpe7c0*kYG_%E;iYyE{;1;&X!0&pg4yvw&{$YfpRz%ow!vv<8z
z!|vWL)%1z{F+^H|s&Jy5k`oPk>^b)ta}>LlD3#06G8ShN7W(adzyE;0a7qxo&ad&e
z@pzyQCbY32!C&a$gcE9UrhIm4u@y0DVO&3_;1i@GdpLphpX^v=yP|C~OkryACj>!o
zm7X3HOq9PJpm}n7P)~JodQgwGV$lu^t3zzqogG>eHDc@CghgG7TVP#_V_g~$;$IAG
zwng7kp~$)A+zyIU0$EEAwQ<;nTqcWTIcNij12sV=urJ42n&#Vs;{}w&dd{`X!=ro`
zjxshLvM~uiPILNziDi%E5$iNS%xxJD$C-<%jTREg5uP!zy-0z_=M=%w)359tWwNqs
z7iDH;hgID&H0&H&Lh`#HRnv-19=mVtpTez^9Fyplm<uCE9!?I-QCWxdZwDOGf!+&_
z%H*-Voa1!*xxp}nIic{_9veD+Dud-=aesCd!-c(hHw3PN=2gyR<_bW1=GFSUG$enS
zAWdH-$zxTB6+o?qoYjt;m4FIzx-}HYSuJu_hseR(X}tBv`cz?Uitbzoq3fJ(SqsQd
z*=uak?;@cvnCHgna){EmTR6gSsEHr?$Hy^0ef{6hJ#f8PGV0>(vN2AT&)MkSdkT3I
zhcGqBmuEU(t@e$HW0<-(J#$l@bJ~lG9+P_#(h2%z&gnJT&C1YD`ffB#=!=2`Kjq<~
z2WpW(&yey8mE;K1sI3vTHE<RVK5Y!kMVhFUKaY~=F-&1<ar8;m*dkXKLbDElJu0dP
zV2_7Nj&{7#qn%jk(N3!LXeS-)F;j8bTgvfJ$uVIE@=tKYge)V+LnX(A8<9ln&3R3|
zIj^ZV=QZ{Eyau-&`kbdh_D(TC*c7JlgM+{{m7C+CCDde&hnA4iM}A9jONb*vxh&L&
zO0`B}t&vz~B-R;;bw*;Hk%;cY;eZ^XM^Y)Bh;UMAeHfC6948`WnMAZqoVW~5T;@2j
z1CS)54u6yxrZ6XVFj5+JvYi>5h*rR>D_yU4p<*{QbUTS+IiNzKSgD~PQFKe9kStZO
z*F-8%e^cTrl>>`c(`qCg&m<k#hE1^z;WdE#d~>cX`aK~OWU0Bfp$AO9X~mqMlEfo7
zSh`Sp>5@1pw@Aus_Y%`e{+_dY=h$d2JJ>gLpu0!^&F$XawQIWN4?Z{1zTv@<zJcsm
z-|!ISMn{K7X>5FSD7T-+hG}$sXsmBAM>l2r2F6Eo@ona}qixp}Td&weefT5Z++c2K
aj0Uqq<5~P=MQ(6-^frOA2Q;Cr)B6j>N7XF=

literal 0
HcmV?d00001

diff --git a/src/wasm/Hacl_Hash_Blake2b_256.wasm b/src/wasm/Hacl_Hash_Blake2b_256.wasm
deleted file mode 100644
index f4caa5b75b22f3b7f62de05859d988aad90694af..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 4552
zcmd5<&2Jk;6rc6(+Uxyr*3JhZO_Mh*NUeZK6bMo;bbF|%$_)vQ?%LV3Yp*|8uaR1%
zaX<umflF^7ap<iQJt1-K5ee}x@CR^!6XHb0dpjF@yono&3sEBP&dl$<c^|)dZ<hIk
zj=~sI9({Ijz?N?-`^rB46h@=l%1C*^k2)IBx{i<ZB|k2v3j~?Gjlq+tOl&fNJEM&F
zB?}6_e~yM(rCWaNcY;Xm1f2qNt&h>}4~PAJFHV-E_j)5pdQq_bHe)j6+)0pn-*2{q
z9j3_0DP?{X-s#p+SwV)>WMv&cih50^%77^y!l1XVFinQIvW_J7`%IUYPtfq@HY>`I
z^RkBK-Jsd7H-@{rL0lh%_XAdvv5PVm19(4(d(1crsk!4PetnSmaiWfx`t<?E+b)}y
z?Nk=rYHe|;_!UhlBj!CetkQ3k5A*!{elx0HkNkFUrBT1~&UU?#y@%Z}Vdlk|>%%_o
z7u4IqJ!ZXn8mcpll91W2oQ8}1LBgEPnXp~7YH*jitFsWS`uee@sORrcT#^9&Yt(n+
zUPlZuo3+=D!3Rk%4kUci8>1IzTb2=MsVuU9xl<MM`6J4~cXT8Rh5h2lW0x4?WzXd%
zm~Lq;9eb{)M4l0)gO*$H7%y=HN?>dw*aGW1dzUdVJ*CY}^%}mgTiUf04Hk}!s0G$#
zCV{3j*ov-LsD;}Xna5hL&Y(O6u$~Yt&^|4*uLO)62>w<{(IrIdya+lJ(ID$*;tis$
zu{Y$Dr+9_gPN28mf$}tUgS;`o<QC3w8|-ly4it(5{*%6Aad5!#*j0>!O>wX>4z?5r
zho&(O&KpbuO}UxH!QmB9`2tk93k6<9Jyl7MizB6-s#+MoW;$twBUs8#rc?9S4Q&Jr
z&;v|K#VzaUfQf;%1{HMH(_AAT`oAfdMG8IDYGq%|f{Hoj7TRWkesi2_Pidi==qX3f
zPY$`k%Yb_jZ!lh9Q@|Y&@YHXQc`k*xg>*riq|qK<m_w-xE~v6tP6_QdjWDJV#vUx-
z-_*{B`H2U)^)C*+_nto7Bi6XQ3K|e|Qlg+EyaoysJO)*$x#cxnQ(8zZB=9&CB1s6L
zpiZW(@4v*@YlVFq<8F`Rj-<rH@*+}BR1kdyDRJAjNSH(>o2)@n+8KcaXv&C;KsKg}
zD5JGefMk%Q6(p&HB-Q>KNhekw`F|>9YET1~9@3iNOpPz*(!D4re~&NC!I|YbFe|W<
z?Yts(Ug0mzA+`#unb@ijTjej$A+`o<nb?{TTjS^E5L<`!Ol)0<t@HD9h;6_|Cbl8O
zHuxqi=i-XqPEICLo#hY)pkreM9-}4vL!-x2><}l=oXNxk*p!Ed(lJ`*ke(U^0313s
z&QW>F9g*i*=W>r~JupOev3t?8PbURSU)qsA!2+1JC>vKe)@ngFr1PxKh7<=IOBFtx
zZH6^8tIDoEhj0}Syh*}Uu!L|8sz}#Gi(_ceaoiQp5gNglr^Xsbs*(4gVkud6ZRA2W
zTmj8>@)i1*ECi)-g*d(TQdqKfmo{&Mx-V6!TcS&d&0XvZ)Hy`+&{8vO(A|pe{rS&N
ze?73qamaVp$=Rkd?};-j)p?<-Lb~XJz~rj;W9Cme^QYuJ!*wvm74I4L^0c~=pP~&<
zx<3|GygX1FHmKqm>NR|sTt~3`P}HHERXhPg111`>idUu^5c0?LKy25d2yq9m9X}LK
zx_0C*DW<1!3(?Qi6rJ-jk1hhXZAAa7rRXA>q;VYypwpG+I9f^3f9Ug|b+n|2y1yx<
zXv`W{xkmIN-PV(b*k%-Vky(!ox@&Gpm+h^i%XW5|Zs0da`zkbhoqibkN!aT`5XZe3
zl40Bpb|C3NJnSZ6CxBf)jD~Sw-Vo1A7dqi!5O(iCa<3mi*oDZy*Bd6#@%vZdgRrr6
zvzvSrH1TdrZ*kZigw4U$r$H0%#1}5JnU>LKz}d1$d;^>*o6d$YFZj+m8wSq)vN{{~
N9i2w;E%(3J^e^$X6K4Pb

diff --git a/src/wasm/Hacl_Hash_Blake2b_Simd256.wasm b/src/wasm/Hacl_Hash_Blake2b_Simd256.wasm
new file mode 100644
index 0000000000000000000000000000000000000000..2724451529574975575f03f42e29974896d5d9f5
GIT binary patch
literal 6794
zcmd5=OLH7o6}~<1nVyka56g;VOXt|JEt6P@ACVIUEVob`KVuRJi^OuqEvYTddomi?
zSe9lK7Y-CvR8g>`D%(I6-cjt>Q^gWC`~=ugSx{j^72oaG+!@BQFdLU1)93WL=lR`p
z&s<?`lr$lP_Vdqocg2CbT0?8#U#Qu<t2MPf{)L-O`96w|_J^3mk?&ehfmDmt((Y-3
zN#9-|75e*Qc%{d$Sv9L<7sDmH6cUEM2&d;atJO-aKBzorm+g9`XiuFJLKPY9CX%gM
zD+PO1XsTqJGOL(dD`%lBq>A*b%1TzTSXmKaRiI6W>(JRVBBF{Is*ZZis*0%kx{HR>
zQzE8{99A`~tlKMv?DFQldv-0mk=wFGT$LSAWs$%wyH*hiKc%Tvt8QgC>Q=2DZi?{F
zc9HLNGNq?8J-tR>|G;4E$C9Nrh52<%kN-~ABYFI?wNlJpwl>zYvqh_5pIOe%=Sr(*
z-kQp8R-w?&mNtv^oY0RwSFmVp)J2+}E$8L9cl_BRHG9LZ3u6x@u2-UO50TZ}nhjU_
z$DVEHUd^_}z=>xItjml%$ko@g1^b~GOuD&6MZ#L@%dcc+e<gQWDAb5GO)((^CryJ>
zlrrKY4s9ByRx}euhsYZtQ*fLU6sN=#gmuzq#d#qpWoiXZg)ibkT#H<Eq)7*rDCS8&
zDHNeL4QVZ^>CnRIqrwz<BPuA_CZPXO)))Tifq&9XQ!_))b_@QSyV_I7G3j4|Qmli+
zX-Wry?P@YI{7QZQ6<LNdrkF-#X&G4>k)@T$GLntRGN*(h)aJMsS%!Nk%)QjZ28Fl}
zdioSS1}LrIsOk+KJ)QCmLs-eTr!!)TOOYlCLIg;1kCBWzgm4LcgnAIH88H%_$^1qp
zGZtz@nzEs4jj)$gRE>4`rc?AvP(4%2Lrw81Bl7MkPjHf8Lm~nBqO?qS#!a{rw<%0R
zrZ^7(z$b;;=H7iMHOQc_x~xNqwAU_?Xp=~6Q!jqnEI;XF>~y^U(Gzp#=})#LY7Fk9
z2ub8PKtUkfPa1_xL4DM3BuDU0%L7`1Td8o11QD6k7F+-F3*<f$YNV0(bdYy|67b&(
zlr0q!zAh+Xw%`^_0aIFf15l<t0J%bKPIv%Hqg)a)kpdRTH2`T3K$-zaBQFNhmY06~
z&xK4s^^<TmX}^n^ejey3_kddbZ64eQW)AE_W{8G7%R{c^AwIZ|Wy3V=EgN>14f8Ae
zST;f<-m(#Q*$9vBW7#1(<SjeoE<41B_pxk@#=K=??y@l+rvn{%MYOG*38;$>i3B0A
zKmyxn5I=A_V2LMSz?~LjhsIU%iL#9jFgd$MBchDlH5tD>b!?Gu<iU<!ANPAb2}qW4
z9?8$D+$m^%&KdI=8lXIMqLjkn(Z+{plyn^!M~9>kg|Uaz2D+k8W+-B$J6q$QymQdE
z4cDHFVCj!{&N$~_LLOwYJzR6!vuoT)jgV~FZr6@DZloz{WTNIj{{8!Z?&#5dm~ze|
zXR{~`*45P`b;+|<sO{#b9q~lrDaR-#TaAD88DWl7+-)f_*-~QRixg8@N*t7IDRHl*
zxI!=#=7`KII=|OaFcg>(PT~~*h2|XN2uhLC|JqSnv>)jWAT#WApb3$HFOm++b3Kn?
z0Aof1G-eODFd<y+zYcZN{@Y&yID_uFCeOAw{*w5^NZ&ZoK_B(N6^93|1=oX<!oYDB
zdOF*r<j$=~(+o#}5tR4wtoV?Uv*H#o2ioU;r305CFep{HA?xkJQ}N${r4nC8!qlUs
zH8kBoe~+Ty<w8F?GWGf`+8_<bk%1E^!~>`kdSftY^@H-}h`!*_tr^NN=&%?IIEti4
zDHhZXtOL}8sJD5@N1;;yhj6E&)*9I*c^I~b%|tLKfS0FX96Ld+KzVo>mdE387rS(j
zGE+EV57N+iVL;#&I>?wfc@)}5&EY`1oC;|)Pk85L4wR>TRBA_8>K4@@cW<FRc*7DO
zcKTHdU1JX2Lf^PT%jytOP#sdL<8o0QCJuHr5kKAGBQzEy0nLMtw$TxO)ztWypXSc_
zRY~)4I!;GqS3d5va71{5j`M`X+EIFyCTN_F89JZzV)WVwe+zXCTn3O^i1qkz`Ze^V
z*9wp-^Xv3FK8t(`tWyP^1is!t!#w2!?#(vu1)pxCBYXy;QqS3aM0AeM5y^<o`4PQE
z=Xi=v(P=tEQ}hO%ML_3ioZqH5`5k(jr)iSk1sL8{U_K4XgZVqbV*vASJ1oY6Zh-iX
z@8urwzJR4OusNf^8@xQt<dxV#c>tV|0G#P!k1o)-g!cu22z$$g_(i(FCWL2UVYU<E
zvw6ZhAL6sVg;{A~wu=R@zU!CYX+b=P`n`wx&H44agqHL^y$`J|&|h}w7U-`iv;_TC
zx{8{*E$^xVAeRgLT%#*=&9`u!-s2l|oj;&CzKN)ADz*9mlvk@8!DCRX?>Q{Sf^N0?
zkgn1kTGB`RsM$PJEWq}HQZw-Kw2-IE!EOZQ)oej#bfJq~nx`wWX7lusTd(iaJl}%g
zBD61d)@w0Oc<0w^(bv8xwJ&zj4%QD)kB{l&wt9Tx(5-shR%ls|C0YvVv82?)<)R*U
z=uXgtmgo+e(5LiC5VmYW*)}@DchQ6_Ke2l>p=Da8Pvw?f_P6W`E%Peep)B2{Rk}?U
z_N?ur(4A%Y9=gd~LFq%1Qao?Hh4*WYFs-=_X`R-gy~sJ2n=5dhE^&b_QO-wPu`L+E
zr8YXkWr)h2RM|&>RjSgG46y14_!(8XMmZ``nQBy^3gX+K+gzt2Z&ID_Q=T6H4G$EY
z-v{O4d^30qaQ;Jw#aPe{=MQ}^_vlGmSo#PyA1U|-FHev1O6;IKd_R&1eALArZP9Is
z?=4z#OXRhsZQ9};$Twi5(FyZLp773xdBZo-kVYC^jDXejjw78hW(u7@*9gCuGL;PI
zU&!ecUxs5C&iqS=81{!Vjw<~MH%)<gIU*(%TJ8_;rpFo4QVPnP`=7@^m`+LDUqm>A
z$@C9lXLiJp$q|m?TR{|akp4GsGEI1M;r~9F;?k^RQcc0ypCjES)!%s5FiOhr7o37`
zI;8&+b)-9nc>Q1U(m)eOh5mI+-ttZQx0|@xob+xsmvQ4;n5LCVshTTV^<1S)cCA*a
zQGK&kwpXcMq1tA-o-5gO&&m}yYj*0A`#Eu>l-t<ImDi~LuxeATOhxNqWwTBtt2#~B
zbIX%g%Jp0JO1)B(PqkcmBe$|KdEZ`v`6DNV<I#M*W?LmFY4gp!Bz@Vua_(hcHmBTs
z-j~fQdA0n4J}umwyE#p{QnhH8?6O2tdDAKqZu%>=hmx|^oELi|sO{?d1wHxy;Qb%}
C&7O7u

literal 0
HcmV?d00001

diff --git a/src/wasm/Hacl_Hash_Blake2s.wasm b/src/wasm/Hacl_Hash_Blake2s.wasm
new file mode 100644
index 0000000000000000000000000000000000000000..0dcaff92fc840a8a399b6410fb14041d83f27baf
GIT binary patch
literal 14005
zcmbVT37nM0mH(=zhw16=8JanmVL0Z!fq?;rLr@eXNP{AXpvdLP&_Dyz({tE8gJOh1
z#5-Q`b`x{QT$s(qxXI>N%;wr08?#BwCYxhpVxqZCG$t|E|5bfob#>3D%dS89y53R$
z_p08j<EyX6%M|;S=XvVk>y8}pT6U_#>M$OW(b1i1RK3QdYIL+*UJb@!VM#&D#%YeQ
zN+qi$Ec{ny!pDU1fNixR&tDJ!>T1?!3Yq?FzN$alAMsL&?Qnnd$k5PWVYtaD_w33J
z59YH=PW3!jWpbEG&roJ}Uv`hDT+K4DOul#DKo7tou1bRo)}P7e2X}i_u0k1xacJ=(
zFY2nKT#VsDX2`2{#UTi%Eb+#<DpOsA-TSk<`+9bb?A@C!^b~uq$$B-eZlkM<4!kB?
z81!OoYcsp^J?r|1@;$5anZE3zp4Ec`#o^4raIr&+)@F+PeNnNer)OVw7})#zGg~sd
z@>#F;HR|)L)$cu^EB9>n;z2~?doshB9@s4mSB-jAw;w^clZj+q{rJ>`hQ_AmiQ{e(
zmKycaM^vKbMM>A%X=4qH3;`(H(?60Q?)B=XzN$h#Qyli{TgS@%P)=wWD=K7**<r6C
zq>{1fHHL`x^zO^TRMW(<czX-ktk>K=R<K{X{HosJ{XKozgWkki(>Dlcm5RUcW9gYk
z9>X&dIebrKH0^bH9@nN*9H)4yrcKl7R9fZJ(Yz*dsYu%68jev7#g;%hL5Z}70>#s+
zkK<J<QSjDBS88oafQsgGlmKo?sSF{hs#O9goCLcL_3P59bwT|k*HaW6SceZPpX2c~
zUVKbZiYL$nZlHQC&QXKl*QHgC>q79;QEsG0+ob4gHK~c3Op_X=NsVx+(P>f>C}~oY
z-z1|HVv?G;nIh5A^k{Y3OYlUfv`{luJt~gkH%-THMmdV|#3fz~KP^kVBq?qsk0&7p
zZLo|G*v^~;O7&^ut+wcUArgkN6B@r`y>!wvHpi18p90V+>8S1D$vl-N3n!lg6r2c(
zMV>}e4Mo$$<tfs)Q$kFD)ehA5bljq%Jd{r5na~U-w*CH^snkIorr%i_!+>O`^&Oz3
z-#h#k8>J9;sDr1|be=)e(|(w9+zBEUbi+&#wohiIfBlyyUOJSj<u2+XN_#oKD|oh*
z%%M4uC@Yz3CEe5wNyJL#S;>5w4@s4kEU=P=v=EZ0m7HQFi)axf)mF0DN=~IyAsJ^S
zr&-AoS^`Opm7H!RXV4jt#H?hgl`NxWkkneqnO3r#mO~P^lGj<u3R(e4!b;Avl9jX)
zlBAWStz;Fgf~3w$R$Iy0bT%aQR&tJ&tf4iKjJJ|=DM|B!kgNr9N<gdwaaur}M>9aq
z_DM<C8MIag*9=-GgKGwzCxa_V3xXQ!K`ai4^Ff>*5F4ly<Xi`<lhzAXC!H@?owUJV
zy)LM+5&G$XxB$dC0dXPC1i93~nn@c4YbISFSTpHDgLPI=V-xgO2gF4n&JBpoGz;XJ
z4%RH%Bv`ZPBEgzPn+0n+Z*fz9tDE}U+|=Km_W0to;!DyIzBFCMmyt&o13efgmxI2<
zp|61c4vfkj&h&9P_t53SG1JEtwwU9c)MF^xDe>4L)5i`#lok&&LxGh6YNiZz7f``5
zs9S6?$Gd5lp=h_DW&|}8f(qX+b9~<e+HG4FRrbQ_UdQSl-bZ_cW2|OvvB>*rpP^{K
zSlugD_l8;B!@XeVfQ$^b(_t_7QLk_`GUq9uP92cPaDn@&&q%0WpmPG93xoFPQl~)!
z6%8Cjl_8iNa?B3!l{6q6)1X0H%yEIPG!zxY>X29+3b*Q}KoM45FNRTN1Xf2Jt3^IQ
zMd28$!?sxDtLT8C=qj-~B34JjtQPrdunz+Hpo4rhUqe?5$8_WC>GkR40X&+}jn~pO
zMncyL^g)3>7zSOQ2Zzugw}FRIWfW#d9kYk{h@wNnGA%l6i#fhd5rkSq*D0|)DweU6
z3d{4O3eZPY8TR#pEj$ChwN%dWF+~s?Asy53Ab^kOSOaK^oANg(Fx;HKQ7YXeF5Kk0
zaD(ET6+v6L#)%spqR6)>f`XCKE!qnZ@B+^eFMxNe2yPQgx4D*XRs04;w<^swF}z*T
z?J6BRAkS8W@eV~$ky=>l@6fhDz!sh%w#u{dPHC~*1$RlMyTy;YT|e$r{6<C47OrXW
zT@I1sdlW&zNa-H!1qgV7XQ&r$3f(JSxP#<AsdT@1alh-uy^0@D1a09OFYa@QB0s1I
z3Pwr~YA-;*3p_)-0Nz6)cvvhw>{@zA@jobfNNKL=_ctkelS<bdkmn@y`<oR(MQWkn
z->hwcfGs>jY<Z=v)gx$bX>auwq3q`Bt%@E|{5C~zQ(az^-);&qeupU}`JJZV@w=oz
zZ`X!Vcn1nm;k*-tm=xZHLXzI1_}z-$4YU|PY6?kyk12TkUQ>wj`=mgRYBUtyqtQ@!
zuSP@ReH!hpir=s3{Xk3d2TZ}^<E9Yh$4nu{ACv-pK%=2>T%)1zm_|e4gMx-``bYE_
ze@J0D{U?R>^uv+};FVU?$Atrh66Yh*y-!HQo^T`fxZ;0S1a;w>-1vw?<oKhCpkSo*
zQSBNC#0$@{@dE2(V&h{KHvUE15U#PYO>5|Kjz6vlawDXVYZD+~0?)A~(6A2+POy3S
z#F)l^QmTDQ-1wC1#wQg2t0Jfi*SPUXhbZz1MNlwOI-&gl0YC5z@dJ2Iir~{?>C>*I
zCl&vjq9>K+YD@eXMW0c!7RwXE;IoRLBDF9KKC5kkfGs>jY<Yx0m)iU}X>(aGd|s-3
zK|J|_>&fR7|GOfn3)i&y^A3^YFDin9k<u5nA0Xfdo}qq}CDoV2k224`EY-duetgCC
z<4cPFLlM-4Yy9}KLlpU|ilAVm^i}N#2>5|#h#$avN(5gMOJ8#>J*D_R6+NXi*Yy3@
z6@6XF0*<Gw;NMUL6{&^3|Aw{&0=DoBu~jj&zA0_~kW>yb@M)>`E%D@At|#AA{9lTo
zE?m>*ryU~4-&O<#Bc*R^KS00_JjeP0)_26lcPecBx3(c%V`ID4(B&L|R}th!NZ-{a
zK)?i^V@*^h)%T>SW%J7SrP>d~jUTvfd{6QJD1y3hjT_&0h$8<`5fqG+eyIHb0YC5z
z@dJ3zh~P(J=|`@mXB7Wf(KAYOwIzO5(X)DG$5U4JpD2Qg)IxZFqHTeIEj&YPRV46F
zrOjnS>p7|Rym<1w>&Z_Q|4b3og=^aUoI~XJ=Zc_Ur1W#`2MG9qXQ&@#N%afyqiksX
zQmXw*{P>mY$1fEBS`pNRYy9}7LlpTpilAVm^c(F52>5|#h#$avK?J`QOTTq3y`cDa
zie6BfYx@30MK9{r9Zy-^Us41WsfE6ON!tPeTX=@pa#r`>qrIio{bix->i!2szgPT6
zMSs*Q=$}l%<5x@}%6~S682?2I^e1f?g;!9B3g^!##H8>S6q59^;=d~Tt6tszW(r9@
z644xwuQP=xAC&_AO{1Z3BqC@iT!%tT3P({$(jOFGAEE0bvhz4*3NgOH6q0<SDPTh^
z1v-W*o)m7-XeivM(NMTaqqR$SwTJZ<w$Bkl*9P;>EfJjG-HJzvb(>K3{OJus*<QRo
z0?<1mi0T~?=f&XW2;UhYAPd*z4=^oJ<hvpS1;e1bwEH07B%Whq3fA3X<L(L@Z`3w~
zYiz(bOBDH@2tmOx=pJnY1Z?0LYU5t9aj$FRmI&V$A@qrGjScjMC5n81grHy;bicL%
z0ygjrwef)1c%Z_@gW860jg8wQT168@ekejvFj9I*+W-L@c!t@a+A?<H;iE^6c+tq=
zs!?@#oINE!;T`9A`h-3Mw<nw@Jjnz6FGuNPPRnrumG+i-9uCcM#F;=taC&YJc$31$
z2u;rO9HlZ>{6pO&*HIPD#_Mn%9^n*lQVz~|P&g<SsCrxUF-5{q9`kxi(X_WFI!YdL
zSy7zoQi*C!;AmfnL>o@m(Yk0VR@ptT$mbMvPZZsw{O-Y50MkCIw#Oh<(!Lx(^2e{`
zBt=1@Z2Akw_M`~mprlx9)2fW#;TYEvOk#Wn=SA{~!C@!vlkikU4##0%_xnkOrGyJ#
z{$F7!Avj@?PK!$COaCK8^$1ZNzE?zf0>U)G2~!G`AExoP=!Z!p3}vU8zKtsK(0dKK
zvlKO$&a#oI=$V%#GWfPr6C^EWo)g{xKJUoykokJEMt`!o60w(WIMo!#w-|K4XZcXA
zrbGe}LUK@qst~$ls;<qFbren2S7ybtVjEGBK~sv#ao9wZ*PBc;V5wxA&OVcXQ4O1*
z>eTpZb1aNR)XcPZT)6U4$?cfL<M^H@t(@bxvKkKaI2tE|5*e#Qk*eYvsxgCaoOJFu
zL}Q#dvQj&T?^$4D#MJl^FiJ2~#ZhUo%|F5cCO&J_Qmx?oUo-%ZPiE%xMXkWs0v@x7
zg^z>6+(dCq3$oN0r7-w7vOXm#Sq@)^;iqe2<>=!i;E}Tyz7CYY*ZJ^9DGa`j>ve<E
z80PtO%$=x8EtSt~ln_71Q@t#&<7LhOHfmM9dM!!V26)zxj@dyE9zM?HWP%cF(!~uq
zzmFSa4sHnZa)NwajLC30e!7Q&!uZT)ETgCKd5)W5ra9ee&48Dq=A3-0OSc*`&4HO_
zG1DAorimvyj>%4;rJOFU;jI+{yPQv|5TNVjb7>Br<IJa}I8Ua@=sSEo3m8*rsu-JQ
z8ST_AXBuP~Of1I|9hNbjrgwQMo?#iC)Y;`V@l4B@MYFoRR_?Nl*)+S$o5FKw5>1g<
zPLdCKQT&kf?Ih}uZBiS}5Lp||6j>W}iL8z0=p&7EY?$Zb%iobB$6_=$O~qo7yQzh`
zrAmu|YcX&w2Cmh>wHmlq1J|nIXd>ZDs571tzdL3IE)Gp4xwxA2xV0gaE|GERnb7AD
zV|;mzfi{^WeTc=urAq;eR|sst5)}d)u%zud3^wtn-ZJ!O=O_k25J7x=7VjD?1D}^A
zU`>|MOwAIoiI&kqEfTO+%a}xyBw!VJGKHt$yE#T3b_tY{pd{qub__pF60oFvF81)#
zDl!C6WJ!rICIkuCRH`=tn`#0!jcUyo+*$)yYv5`PT-?CL4P4y7#eKN{b;P$LpoF1F
z&WHyuN0f7V8I(WbNroboWr4r0wNs5W1eNuOZ)b*92cSEgQSJ$JM~)y1MtMg7-67B&
zA<${BGVh+e8+q4cHY~DfslmTm{KGw^!(2g6^>ecZxoL9GOfRz$+ggeoUo}d#hqmD}
ze^oUmb?ga`uJSff?J92@R#d%m8t$VBtPl|}DBib0RzI`&I+?8(UB6k9TXlp**r&xB
z_7WDq-oZCq;A5jf<7M`dokxlkZb+oCQ;3YY!_Yu-YeCP&6Kv5$2>mBF9I(V-Cgo}@
zqt!GH`4;TGaHpXjt-e<7GlXYHsSdXQq7@-*b+QAz9JOMTE6Hjm>ee8HtrEi45WARb
z>Lfc_Xd;#aC`_UjecfR)&?i?W$mE>dn+OtQa)3Tr(B%$9$wS*Je`?0&$KM5G6io$&
zUhMR|gWC`mQ=-%E&=MUYve6+YI);nruw?BT15d=q4{--g17t9jdAPRV(oxLyiaRA^
zrsMl{2engYsxFxCgRK$ILNu&lq+==Xd_GKfVV~922dx3dnt<Ot7KI4S>EpS`l5TA3
z<^*XougnLJ=a<n@UI10WSx9qm!(jnh-AxPO)+yA^i)bD%rbT=z&E?Y&nA4mM%Bi6I
zlv`|zHszLR7NbH~a85soR67Gzm%`>!C)L2q(bAmL0#N=$v{aH|X^1`CnrN3)!{rId
zbCYh%=nOs+%FAJ4d1bmS&*7uIO}FKNh2>&ld58tD{CgF$QN?t?D-eaV5QP;%6jow-
zNK+c9r3751X*2mq0<LywNkH7G0GyeA&UVtkaFGS)&}v*v@TZ40bQYgWYj`cK;B{!#
zIww|ZLHV&d*A{K8&eJSLg|Jwy$IXrvm>$kQiI{Bw#75ZO=)?@X9Bs_eDmyYj`7zrl
zJ-RW(E^VOI60;3-zKPcbw1F>#;wGSPs*KmB96=UL51Rt?O@h8D1Rbo45Rc8Yxhx)A
zG+m0vR)>~&Y@=;99^0IF7%t+mownNvVH<77gm5u!vCWqK$0cQS6t^@KUnV(o30+DT
z(^k3+BkyvvpRg0c6&Tqo`kXmID93(D3Lkc8W!hmThaTzydY*S0?#@2W&`RD#D`{tt
zH@nMR@_0`f9px-kWq#O8J8_vJOWSA{?S+^7Xe(|}?B-tD&pFC)A0pG|<XjGvpL4yo
zXmc*FS&Rx{IoE#@xi)~RgRnX1<QjN68q7H@0OjY}pk#wws;FEnaHC?Y<k|q5rn}Og
zB8CXpEude3kwRt46><bwkaC5<NI{GgLX3b_G-1VS_pn?a9H9|;eRbSPJbg2EF|i0v
zq}1W@_-B4~UtrnB36E58CJ@}&v_8lLQtQx?u<`;MOdxS5VGI|^lAu~n;@uRxV|(7^
zzyE5h=C~`0ZG4ofv1iab7X8kOdzpRy<<8POYjAV39;q%nTBC$EmJs~CA<j@>7I!{R
z&Mfxk=2h7ob|E0xGT2vTY>M=&GWPG<tUVDMV;0v-gUqUYRW9kuy;2%4{x{0LI!cY$
zK$X3cV9SKJQ@hSb@y^-I*vVk0lj2G6Zj$3&D=3Tud6#doMc-4A;AUt@2gNCY=|c|9
zaH@kz1qE+?u>aJ|1YSb1!KBIdXle>2vB_~g^Kj+UrK9@5o6;Ul!{(z6*Fi~6eT*?K
zQ^i)jjE-V6lfa&3Ix&8yr0Uz!#4~7y4Io|wWb+rEH+Z!*CpFjs_3;dJ(G0XFIH~Cb
zCC3Pz{!leaVcpWnGifGvNHfhbjckj%D%_l1PM2OE=KydnEaM)aJ!F_Aly+O}&(@S&
zo0aI#m9acG#0#*xfz$2abO~i~=4dLw=@y*s5FE5*9yVC>u~%4t&Rhu0g-)+50Oe1h
z^KH@ZA(1eY$BwL-*k#CxF5a;nVd6Re+%V?9{QTcv%yEspVid%u%iCM3dd%C{&-~-N
zZr@lO!vYt=ZK_~^%7KcLDy4&0b95{HpO7{t&?%DoL8{0rl?<h%A0(rMww4h5DF!F%
zFpF2LlQWC+05iqZi`jaZt#{1g&`g_^tLoyLQ3^AQb2X~Ndm65|n^xjH41_(aXatcA
zBo${1Ugg<BwDN2rR(ZCNtUQBCRh~gLRh~h$a>W_c6pZ6Wb7;_L4h<U3p+TcQG@vGP
z*w7@$LQzmSWs2bkr%Xw_hvR&rlO~$;iB3{-pfr;v$Z3&>AAO`T!N5&0a192o!N4^b
zxCR4<zQgOO9M48lDVbq#1ZLwmc7`bh9n(OUnPFy0{AMA3vz+*Kfszs4<&Phu6c)cO
zd7;9~)a=p>GZz8vb^|&O73agkd}jv0%abz$bZaV@0p`mLFh9f^SPOuIE6jFAnyW7~
zOM-~M^X&Cz>COd$vmgWq9^(=^UfDdnvMF8+%*9UY7lHC;l2dHa?+KAGl*bOB*=8nb
z!|IClH5HT3MY<rb)Q);0ag+SPjq;{@i6ct>U}gU1;X*dk-#f5xzWKAEV$b45^W`sE
zmeKCP{-NG{X1I57fU<?cV1b553Io|aG(1R!k%8gfeq7Vd^yWtj+4vgs+tsn<g0nAJ
jM!onuq-=k7V3_(d10$I{;qR0N3kQYD?9+md+1}p))j*G%

literal 0
HcmV?d00001

diff --git a/src/wasm/Hacl_Hash_Blake2s_128.wasm b/src/wasm/Hacl_Hash_Blake2s_128.wasm
deleted file mode 100644
index d54b7190ac2edeefe192a03020bbbae18251150c..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 3629
zcmd5;&u`;Y5FY<Z9NS5qv|XX174x=>v<RrQ1y!i0HHV7Tazlb+)=k}AH%=UE2dOJa
z3Kuv47sO?`?QJD`;KI580RI9P#03d);)FoV*iQ12HoFxUT2=A;@p<0N_sxtS_-Ri8
z0OgxcPflR{zA{oq^rMc)_m#2oj2~@0=6!=6BOXZee4~<Y@Xl;B1@jX9&0d==(8;u1
z{IUn?Z=aE4cJ-d0_`M*~dO=SG*ZzR~es4G!#7VX$jobS{7DvI(n*cK8#fu>Af#2x{
z2cXEvIb(hlKIpf}m?}eBvay~YMR5l-88By~aOjO~&}E1#+sG1s0EWDMfrZ;UP>~_m
zWec5$L8sf^A08eCNjnW61yGf-t1^}dd=w-xm=j8^13&ZIY33)HHU{mh6XM&cS+-Me
zthmkA>RRP<UQ)*3Ju&U-&#Z@a{T;s(wcn2XZm^xUZ*RZV-Y>^tKg__|T)ICT(0M_-
z8ytiE^7Bx=VU&g7+<YD`^3x3JFD-=~l2_?Na9>!4faa^`w$dz40wyL4D8RIx#l?ig
zY`u049mW2E1U^jS-W0yN+_N%))@p?cq@AW%zkJCWe8(lJs-w!-gIfTi=DET`%dKwY
zbI<jZ$kU^Ia^$KWh^jELislZ1ZL~e00a~8Y6_$2~25?WmlcUk5nI0XXO?GFXISY<r
zC^lIU&L((p<QjmrIe`6?(}MidOaH0}!X)rxB}Z2YZHNjQSfL~A$DB91I?~396ww)+
z3d=jo{hbDO1{@jvbJ*u1bI|c%mm;${GMgf!NT-oGyiAcfuY&}dbG(eq5p~o=1M6Z1
zRpF8ySF*E0Go_oGYD|ZvP2Ld%miR7h>K^RrV+2$@1ud+*H6x#p3_P(nuuj2xx@#_y
z`4^K(^yoy7QW8pv74e~QccPre@g0$o==5XZpiWa77knL2L<*`Npu?wa<WNEE2vgLM
zjwBrlM+w5lXUq1tERUv+EIL2u{Zg{b43IDXNtT%fNfsteXY!eUlBhuv)rE_?Xp&4#
zNhX(OrA#Z+VJQ>u2m(uCmS<=P#9k{WOTkt_)>N4VX8tM5(4CYa0#V+lQsn2LsI<Mj
zygnrD^Lle=N7Qp3JxYbhGnXVY#fA=7Am!l>Q2J1$4T{t?F7a{`n`rUNO|*-bTi6t<
zxN3_vT=RfWB?7s3tk2DLag7+JR6&h0Xx()-rqgSvyY<E7{FRfFl7nk^mK>)Py2@JQ
z311j<DaE-6iaOA9vo;!T!|?w2`^P^&u?;$q%L7h%mHUvakNrbQc5TvAJfxmxPFn*O
zNs&8IbcR`rp&X|U#ht-q<xNTzlie$O9#sIdfweL{3J@JXvrwi-joT~*XB91$bI_p{
zQK#Pay?Rk^TkM3?T!l^OGc89i#JGsApdzY-{z=c#6>>=z%#*==^E{gR4xxWHE`v76
zlM11~uI6aUDxnFTWoO&=(=SL^1vbHcV)7SlOZuX3O}^;m7ki&BI^A9D#Jxcn`B@nE
zF-Vd)!EBiHg9FTBOosg|>;-t}htV(zti9rS>qak3)3E;lv*SU4VIL#^I38x$^9Q^5
zZn(d7x1YTqbm)7_qa^I7VJF@CDCp34_r@!*)HA+oTrH1^i^i4m>1sH${4#Yl{Cmdr
Q?P@qUsm9{M_`mq{F945~ApigX

diff --git a/src/wasm/Hacl_Hash_Blake2s_Simd128.wasm b/src/wasm/Hacl_Hash_Blake2s_Simd128.wasm
new file mode 100644
index 0000000000000000000000000000000000000000..6d5cdb7834f987142bb56a6a2aec4f62a473dfd3
GIT binary patch
literal 5638
zcmd5=OK%(36~03bpF>KcM`G2gt#hq7k)2qHWk-=zRnf?bV%L^jRkoZ*8)`(2B$C5N
zhQk&PV$sPW1quX60(6rC-L_q5S9BM^`3>!+KcI_tQ)E%JKvAIIofj`z+I8KH1xdW;
z%$ak}J>Pc@?+i1WRfRF8eD=}(`|Oz=<w!ZgzgWAyqqLPr{EN5S;(7ufN8!TLBt4B#
zbyks;P~oS`ao`CdLv>^o`^(43N=scg9kXiL@v2pgF<rZZq_<j)M%{5orFCJ~a_hD=
zdzmrWWW1kAp<xy))*e%2%Pwc8T{@^0;4CJa49m`{rft`YEG`>#`S26EG{aQcM3;TI
zj@e)d`Enl*7iU>gHaR7GC>~nHN@2IPzi&B(X6XZqrDWR?*%k%-z;fy=9hNk^XS!yg
z>6(rkZ?pKP_fhX$CadN01B3d|@W^QLr$VK)nek0hOZ`^hk$L{AS+omR&E{cY#WpL}
zOtY|6s_tE!nJcs!aA*~(E!!<I?d*Rww9Tf=a`bS!pvHp}4>xhFrsXpI5l;N9*w7<P
z_DTm9QW-w|@HqR9WwDVL9&T_bD)N((dswJgciCvhuPqRXE7>o9DJuI5(PgpNQL=5Y
zDaLrl(0P`!dg`Pn8@i#`hH864S&tcvr+AuDl%9pLMw-E}psb-(cs9O_1-q^;d)A~u
zQtdKn@Y^x!@{m&!iUuz{H^~fE))R~}T?X2h0$(+D6mKh1jt|%|&l(TB45CyYFp(>M
z#YX-k*Bs>xwt!qUk*kJWk$aSDPN<P<?gEpHx>5;p&G9_N`2gklAjP;2Kf2^+5Ry{y
zTn$9az^AyvVJuZJ@TnSXO>Gk+#ZV|qc|DWx1Qb>X+DXb|vxcgtd&~1r(F-V#it;D{
zguuYxA0@7ZSj5Ht%5Y*If5>yBLd4pyR~b7}C}uFqiD)N1Q;zEJG|v!10z5xam<Z<`
zwf1uXPds*%MSQRBj|EG|0djdFSUMggSVYlOQG8K<Kr{e|@?0mC4*{kj2__v<0MlT!
z3}6yhIE<xj1~KBqP`zQVSPBjM#fp&Wsm$L6GH9$+*jU6+@*0{;d^Y^vS@Rm^ePAy-
zC_!0t0?N-KJxmj%X+Q|%pcusgT}}rfLNQOOp6g}BpG0h+BATXy$}wUB<?BAvd}n(0
zBnkHU1XMk@8YT36!uadIe)qR~S^@zDFWIrsik6lOqx~!xAK@+&NSG%nDVh?D^`Ow>
zJVhzL`I4fzNuZGwi3Ecp1xYktDnP(DLZCQT#f~HX5oV8}@(hN>7m7D65?xl>AA5=p
zMu=<-W{i2Hm||(<BILMjr0p_h4yIWeayq!~d;fK;pZBjnM_EVxW)lsU!e0h|IF}sp
z9{E%NdkPu&5&R6!F&#}6J6>s1=H9m2HsT4Ohk5q#3frX23fl%7Jk)*duQV_w1_fm+
z>jF8SI8y!|VWj=#1%U!tL_;4${s$!gz83z`Bs3UyjZO+5A{{J`@e$xOBH=U)Dd?J^
zXz61H7)Cu6#|=Dzq9-UB;Re++G=N=y$e;E2Xej;$Ji#B;oqPBR*pDIhn30YK7<6SC
z!+?&kf)wy&45N*Y^@&R-C_l^6_;~g;CMNH<=mf_X8y|=Fabqm<F1~zdyiB+jR1Q+$
zeO!1S@8cb+Z~I+>#tkMoKSihDqr<M#p4?&Igd_`eh$upbBy@Z&(BZ_R{v_hZ_xKq)
z9Th=*Wlwg=Du2#U_*uc2NqUaX&;*^;HU4~%Y0%dE1)vcH1|2)R3-ky*g&8qbfmMy4
zqjUJQ`FW_$SNMxy$x9eW=R?|D=*ofd=`LC2FT+&$d4<l?G`);nzeKMft&23lXXpaI
zL^J#{y~tk$Ft19Ky9_Cy+@)w4QEt{#aoo_4a<4rG)#h;PJYvpERD-Te^JQrVkODHy
z3o^|2iAQrZA*eQot%2u0-4<w$e+R~kh_Kj8x5YByT1dCWFv6mUu-GR8RNoCj^u_^S
z0t&AKg{2UMWsC=dj4lGMcyfn;S0q^waFtdg9;`|<@U>vU8+3)<2qUb~>-<ex<5y{k
zU&F4hNmyNl6kzpcw2ZL&o~Potp&wS)X_b~R9&S7aX6taVf!G@oX3&*sqf9GNXFv)t
z+YlAq=o6RL>59N?oo@Je-K2H?eHd=S`(`g*n`Oea5U<V9`=;=|*~dFnKL8$EwAF>j
zEl=*?aa)oF9^14X;jt~@;cJ1%9l8?@gl)Qmf$$dHinc8V!rNW4%HP32co$CTZF+~^
zqTBQ?I`4-em;CQ}{v&kuQbn2xLXzTlt|Eu`JY#y#9}Wd7z`M<NeC<w!o3zY#X_<CH
z-W0o1VtlVlR=EXJF(3A6hxW*#ZQ7-MWO+ch`5_f~i4J+0OkM#p6^V0YNCD?c(K6zk
z?Ws6!=*PM0V{om8TkD8fm$(L9nd)U}2ap1;)dd^sed1A#ZVRr}ur;rg!H8&3jlU0j
z2SJ=($~k4iwUBa77{n1loIXLIY6b&d<F3Ip-lCQ`dP&>YJ`qPYJTfG4Vu=T*lq87i
zozbLkE<EK$vBkM5BgulOcuE9h{<$S95yjVnBpPLS4#&0pBs0E>dj~3<ytvB6nKFUH
zf%fO%xRwsm_}`Cfcwp^0zGV^Z_iDf6+i!wsIA1Z*CU`pK8m#}2@T_}Iw%R{FL7fyf
z$+T~h;t6{?e6xVJ3e&+`g<ZUDsVq>jUTu_Y(=F9&WI0aVA-Cn!tUYq;<g{vTscO-_
zS+ZM>m0j~cr_NMM&1R`~K<?d!MWq_q=G}VBrK;Ijpc|##>1#E2+bX(sN35JutywBI
zr+;h}5&q0M=4G_yI+j_5ldjzS7ZguhR^B_2r!6V}9m~^}m3Xc5ggM>1xq5SfO4WvK
jRjry}Q>|s%gtu&U=dO^<1MkA48PxUK=m|6V|LFa1EL{VM

literal 0
HcmV?d00001

diff --git a/src/wasm/Hacl_Hash_MD5.wasm b/src/wasm/Hacl_Hash_MD5.wasm
index c6c7b045a3db6d4a13da0847a3fc92289b2d17ca..0efb5420242247c42c88e32497204c5fdc092aad 100644
GIT binary patch
delta 530
zcmX?BdA(wSm}os?eF7tbsAH-J@+antPIQ<saV;-bVoqjyUc8wJ1LI^tMg>Nui3{af
zjExwWCmzw99Kv|MzMGkweR@+v0|SqgM`ChLyhmbjM!c_!X?$rxN@7WBd~Rt@NhSlA
zBv#3s#NrYLZZwS<AQSTQQj0V4OBi@WP&MWz=H%ojGw=#Sh3lcV6{QxZmN4+4sfM_L
zfgepcB{Mx0Xt970s@Z8psi_QtXoAW41(gg!Xo4UoG6;(UwX;Edyt$v*T#b=&^Ks3I
zjEszvBefkknH&!=<SB9~Fil>ht<1xy$g9Alz>}@Wr@%P*qP909!(>^VY(}QZojP$q
z@`X+UP@AW&5hK&)8eL1E8JoB1-2qXP4bFh51fwDlb=mkVh+1f>527xb{bvMfzG$%?
zNH$t-02*OrWe8-aSZ&q&$&jVOsLbHl#h9hS#LYda85l>549X1L-OM0w>jjq-r6%TP
Q=B4WaV@q$dpLGQz0L>zz$p8QV

delta 652
zcmca!ajbHJm}nhSeSHFBJtGP+F<*3|!-UD^jIxZ36BkNOyvxbRH1W9tBlE<C@=V4?
zlY<#A)K6mOX8+&R(7?c>=#iM56Yr5&oDuKqVj7>5nx2?k8DCnEl30=&pIe$!lF7iO
zfKN?MVsQxrw<K0289-whc$C3%!6ikhiMg41=|Fqx;T9zW1%Vc2<}vWfVJL&E%FN3w
zVc>%}1KAw70*HGV`0*%70~!K!jR44+`kZ*I#-tUcrZNcPaY%B0K_!C_9tB_@3(Fuv
z!#Tew6&$kh@#(20@$nf33?iJHEtu8R7@0Q5YffZjWSaa@%Yl>8@c=`fB9{W=WGiiD
z9tK5T1x5v)Y(+i=hRK=Q-atiLwX+$SCoAg20m%lP1fa-69V15O%_6#%Kr=S`>D>WQ
z+6HGp)DOcV5S49w7DQQ^>Vv3Ev;T~Y%#$-M)&ogN%MCyyPFWfP*?%mznl><IsW2)t
pI8Fr8Ox)Zvn}LagkwKY(dlECqGkWmw(gP+FJ!mr7eBY{q5de%J({ca+

diff --git a/src/wasm/Hacl_Hash_SHA1.wasm b/src/wasm/Hacl_Hash_SHA1.wasm
index b345c0a6f97ec37aa1ff1b4c5e11524688ef9084..35f56707f2f2bab82676b8986580904b32cf10de 100644
GIT binary patch
delta 350
zcmcbU_9b-#KT~}_BR6|XQ$qs-x0FX>a!$NQVsS=%u!o~zd}%>SVo7RzPGWHh1E(~a
zWCloCeqL&EMt%tcmnfQSZemVOeli2M2%30NYH?}_0}qn&dL*kDc#$N);whQwsX*KL
zgwf1ND@sje;71ot&M&BB5I`3Nxs*Xr0?mZ@`1I70`1p*)dCZ)gOpXT_@)Wrgm?oF=
zDDyBX@+vSX@MJ6UDKKtc#?#Eq$go*Upi@cx7ekf`BRBW7Mqp?$GAJ`}_cMY_)e9~u
TN=?ko%uCk;Ia+V>L!)W{q9|>T

delta 442
zcmey8dM9lIKU4iQMsD_}O$`kU+=?EF$vN>JiNzW5!5)r=@j0pKiOH4mr3ER8C8_Z_
ziNz%hoKo1;WB@fWaH)VLgG-816LT~3(osxF1_}aA$;@NmmPb}rkESj&FSCS!2jUd$
zCPCc8z>7~!8qgS^i}+BiLbEBYC^eOVKNz2q<otq41_693z#bQrMTClTeo^Y?|4e+G
zoQ#eK81fXk6c{Iq@hI~!DDo;WD)3}0@+mNEcI9biW@OrYlD|_)uaPlJg^`<kZX+-X
c7#WlqxTi6KOw)sVUJn!*deF$2TxnDd0Qdit7XSbN

diff --git a/src/wasm/Hacl_Hash_SHA2.wasm b/src/wasm/Hacl_Hash_SHA2.wasm
index a33351b7088b3fdd5819d6e338044feb84cfaf6c..09296bccb1535738a9002e66ee7d83a4b77c435f 100644
GIT binary patch
delta 1353
zcmaKs%Pz!F7{^a*L^BBHlDW8@x--V5Gnm-1>e@U&Q=?Q2COQ&qI+>lFq=}UmASRJm
zh@BVUVLXBJot{ziDck<e|8l;5ZSz%ne370uyWHk=qe14TbhA=5bknhovc9Dmj%{jd
z>&AI)&#YU<$!_(ua^#Sx`tPx->C{PY*cq+Zw&G{{gSZIfdGB2rEkw7rs&(iF109#q
zNX8A36-u3Pu?(txY*yKY40b~jPa#%0tzB}GNF#D)IaZycWMJs}^Advs$5a4)ws&}7
zIleHB)Ar9Si(6(evEf~SUl#kd+iOKKoOZ)YGB$#B?T(C%V%F}+*cfi&cVuK76V}mX
zB^jdFP?ThEkf>P3M+{^le8hkzv7u-WZ2AXt`MQew0B3L-uMcDv6Fy=fb3Y8ZpJ>k(
ziOh$01hl|pZpUXMN&?JbPtg(BB5uMv0?A{-I|5n4eui<t*3q?L{LA6chW%OjgMsPl
z9}I?~jiS}^1CLl9-^&8$(cq`?e5GtobW5s2iTAqjF)eSsk(xS0<(3+wGF52bU^Gkp
zr*W#N37Vkt^JA{k^DVmd9tx!}Vc&0IPw^rygzr1IUREzjvUgX`&&#b1LTGH8Y!HRU
j)VRnV0E>oHW4Vf&q)C>2ua8H+Ek6F25B-*ZmT#0VAVZPD

delta 1554
zcmai!Pfrs;7{+I;gbiY(|CC~d8AzoR0XqxQa4>;`#R~`f0Vd13r5nn!Y1c#!WE+nr
zCMKKk2|S81>A`p~@e^?I;&<>%I5Rsmz`iZVo#%Pp_x;VE{rUrc_=&%7rKbOW?DbH&
zyldHQ-q_t{ykT2yt7Arcslt9KvCMe4WwDJH{GI>S3LM_Hx&gX3%*WQR`R)U4ex(Y|
zkoZQGU5ig8<7ynUHfZDRw~y_^E=ng$Mr(H;hBO8WI*xVZdIvGFExrVb>u~{wvPIBy
z?4Kw(2g<JJ21?G)LTSBOEEM%0fsUrA&~2#_(_n{Y$8nTg0OgjY(=($GL&?b2(POYi
z3?bwqIN~LQFgyVbmkh!%3pQwm3PR{}pdLdAnT&!E4B}=W3YM_tIZ1gQl0$LG;-I9y
z03v8cX()jP%_t2|p$=S~lav<$Ib>WhO1*pwqqx;*38=wh1}Rs-5jBG}EZ;DM@rBnA
z((w%Jpdh5Y0?LX(%BwNM>my(PJnD|;bX=Qre&7VWCGXnW6;G)CFgH!&Q9Qq5f;_TW
z^%jrZR2IMcVFS@aF_C&j<`855%lOv(7@`Kb6TM_-(MhFN!rQ-*PtydS(+t7HAd^!Q
zGbCQk5`*SQj^L<j=KKFDd*g%f1r(f21p8I<eO8CQy}H0SI<2oP;qy&|NM;9ZB7<aT
nR`-1%<2~$)o(9d6Ji+}x^|^^b{4DzPrZ&1nI$5IF_-y<K*XQFR

diff --git a/src/wasm/Hacl_Hash_SHA3.wasm b/src/wasm/Hacl_Hash_SHA3.wasm
index 3243ec79e02b4b850f6427f46581ab175a85b6db..dbf593949bb85bc51ad6995a3c918cec53066f0a 100644
GIT binary patch
delta 511
zcmX@z$vC%@al;HIrq3;tXEAx!i+Lm_=fry?7H7l<dpH`$r>B<0C+4IxaEqcz<R<3i
z<R>%m2&0Lo6{V&!@S=+*=ND8m@QI+QElMp;En(nCGJ5h#ChmF$0VENyD^fDkQ;SO&
zgwS1ATv(c#T9wKmEP-Zn63}hg@j0n^3?kxaQW+q>fn-F{WipH7i!&0lc~cq0WZ;@6
zJF<w@Be;-I0Vzv0GBu0O0EQNW1iC>$Y2$b!BNGNm>@uci3{qHSj4ezUq%mcLfX0~`
v8ZpQq+%fqai?j_Qni7+W^NW(=GxPFNix}iEOn?NF0fPb}U^lC<F1H5&+wY~x

delta 597
zcmbQ+$#}Yxal;HIriHDOXEAx!%X%ax=fnq>6s0ESX6B{Gd#5HRCuYZ|r<TMg=A<)l
z%V3w!P0Y#3PiEke!Y-Xwl$y%Gi$gLwzo3$VPa3SV9>eCM)Z)|<27Vm&mKLNWmZUNW
z;E+zs%*!m!U=YILtK!1a)YPg}24Q)av+Gg4odon$c6?519)pM+ObM!rjKty$umVvW
z3Nnl1i!&0lQyIj>Atrb@8bd@3jV$6bfT7JGj*zK`NEw-$L8K&vP&66G8yT4}NMZ<@
znlVVB3mRLPFi4{dni?80$Up+iGq(U3(O}n<0K+i8AhjsBbn*rkE^CBLVp4H_QBr(n
bUS4VugPa)HEeKgy@F|GGB{qL!S#A#i^L5VT

diff --git a/src/wasm/Hacl_Impl_Blake2_Constants.wasm b/src/wasm/Hacl_Impl_Blake2_Constants.wasm
index 2c0f1579533e41b25cc28dd843b2ea1d097c260e..8bf185786ff4f80282f9b5fab198c566c1389795 100644
GIT binary patch
delta 109
zcmeC+dCR@wC1b*`hK2?P7A23w<eYeq#Nv#2r<}y>RHNef`1thHlKA4x^xVXd#H5^5
Z22Mp>iZaW<D&m6~xG>agPGMTY3;@5YCv5-#

delta 120
zcmaFM-NCcrC8NUB#)bw47HyBj<eYfV+=85Vr<}y>RHJz3{Ji3l#JrM8Od{@DIAx3D
d<Kxp)OX4%jLK2g5QsaXexNs}p?8&r*831BOE35zj

diff --git a/src/wasm/Hacl_K256_ECDSA.wasm b/src/wasm/Hacl_K256_ECDSA.wasm
index d1f56f3a9f5d59e69b496ed0a8abcbafe74f34f6..c0e66ff8b7bcf3c7388a31c31c4dae96b1574d3f 100644
GIT binary patch
delta 84
zcmbREpS9;d>jq9H#%-IqnEY6{#61#|bK*S`i!++j*|w*%G4Aza<xyZ#VBD@5%9z8;
e%B{etz_5K*660bvsJK%mqZBh#JTZsSKo|fA1Qg={

delta 89
zcmeDA&pPox>jq9H#@(B_nEY7yWjqp-bK-+bic%AEGxO4$li0Q=u`%xTV`WlcRN&dJ
j63Upv%gU(0puoL-Y7*mOHmJC5CZiNHR6Hh!(LfjgwdfZ5

diff --git a/src/wasm/Hacl_MAC_Poly1305.wasm b/src/wasm/Hacl_MAC_Poly1305.wasm
new file mode 100644
index 0000000000000000000000000000000000000000..e72930c86ee20895edb23cdd78e033307206b896
GIT binary patch
literal 9539
zcmbVScYIvOaejMfS5O`Z5;znR->?EKAOR8pNwGapVy_}aN|Z$K2plPN2n1!3JOGja
zQk0A2qS#Jx61O;s4LjAbQ=PcTPAuDrn-bfyWy?+f<gfgd<fnYI@7>)40mb_HCy_I=
zyED6QXLk0RJvyl?L&k9&^Xd~%JmFMcFq3BTf|)W?;lCy$Q*&hPE|{8{Ppl#b=lgxZ
zwii-^J(1U*;!KH>!>O^<P<pUnC_NN$+{9Tf<kZ#C(UGz7#UZ}?VtRaJFx|1mao%u9
zYOwF}a5smT@Q_7eD?_Ql!I2&(5+1O?G@IL;g7A>~u#N0Wx<^wz1L<C;Fg(%?j~pLM
zjXFi)?gi#*?Qn|2Lzadu^gNR88R))v_0pyESof8_>uIMXJhm!4mK%6IJvQP*vs)VP
zO^v6zuZ*Y0#tWvLf@jpT(O5iDT2@}+7C)_`nR2`*O`_z*C$&!wq<RLsk9vE%PmBy+
zYu&u5y?ZpvT<RO{yYh$=PtG3`Y-*xre%Dy~N_yNWT|nvTC|8$u$`<JA?Yo@5GVYXD
z&$oAJES+{Lmdx)MO7%Ey%>Q6+Q-O)U{CWMP7oO+evB+fUR28H5)MPxMp3~$wQUb>-
z^30$Y9qgAV9Erhr?y$s_j7q|Dq_kiUN)bO;xW}p>f>O^NR2@T|GOtL=QSO%1+1{)%
zEA+dD`bAQVA{2LUwhB~qIh~G!a?cD%ISrS4&f%=#3QAlCDw>_ppg9&wO_50?P$4eL
zI7%u}DT`3%MT6yNTNSEgF&4=ZES734>3aS3*Bhs$21~rU+1r7Xq_k!BHmWH*(_ZQ9
ztr@9BwO5`SQAeq1_O{fcMjDWmrKpu<sFOz23*e$bKf}e+PA5vTtkZEx8atgN2|68g
zF}R{45Lphl*@3KJ#|kW;VIQKh64C5d^~)-()P=8B5|uSdTxn90lx8K6wOFNn>y)^%
zUP)3mD1o%-1deP}5|vF#TxnI3lr|-h&4}rcElQ%&uEdoNB}wU20@;c!*qD)RXve0E
zY)1!LGjbO?(Uy@N*ow^=*@@NIHYvNXM#*k8DcOT&B_7r)>B26wVEbf7_F^}OD|a{c
zP(!(U;897redwZ|a{I9t8*mr71Gt;I${oZ#)K%^f_EA^4!`M$<<&NM0)?){`qc})i
z<&NPHb(K4g!_-yo1ddQwxsy1Gb=XPnUL2#Ya;I>dy2_o#3F<0$A5KzNxih#|&f=8Z
zkJIu1?vrykBM;i(k%w?rBS{{{IZMyuEFPMcZk$tc0r%tKjHK`&&S&HzQg%!aE?U})
zUN16xOVa2?IwO~mQk~1VsN@kmfS!!>p&OSn(vLo?GJt+d2hpd4hS0BM7#A>*kr7<R
zU`9qUV#hp+5$znqBX~3;S1>H&7?rCSlE*M6Z^0FL95IY*=*DnWLpO%UG<0Kli-v9t
zk860AdZu4Wz3|Zu4qif!>l9N!Jd^^iF$G-5^*5ISZs3NW0<LQcxXu)CJ(L1&P|_4|
zgVCz1^am}pk{gooT$#W`Zd%vR0SjlINlbdqc3Pa!X(u@CL}=PcZRWI-8=TOfg{Ga9
zDO{5$45s8sgM>U~XzD4Zxm*@wGi5Vi4RT;k2y9!KdJ9>f(nC6JFl}UpQ_M+gHw|tY
zxn(e8<Y|LjMs6ECZGwdMjPe=-f&5l0Yl^$AL$W#UZH96v%e~#on&;kO@QjgX4c=<x
zIfJ(ud8fhKjl9d?9Y)@5kioM?U6R3bq*2m$lDeetB2ALMn-pE#6}r%?8)cZk$KX9i
zo>yhRaag^pANd=1w1ExJ8zx5wQF*TkXUF#$yw|AC`+X)Vzv(ls{Fcuo<+ptX@;e6a
z<KU=#z-L_fU7tzH@A(Ym_c^!P`=F&!`H-cqeAv>Ye8f`7M~!AiNB+RlsC>*)SN_n_
zr2LVkkUuuykPn)SeB6M8K1A*l1|0Tba<c{;_z`lSG~m#WlKYeaKj!0RQvSq%U!&Zg
z8t`+J`!fT6k8*!*zz<UHFAO-8EB}OHcKS;L4yOq#ecFI3G-IW|GN2kwS?MzdRHZp9
zeb#`g+=yAV^4A7brA;gSjR93@(@KA9Kvmkb(%%_Sl{T&P_XbquzJ5}z{DT2iY12xd
zGoUJMTInARs7jkw`X>Xb(x#O@Z$MSL=~HUu3kFoBO)GuTfU2}<r7sy!l{T&Pf&o=&
z(@I}9_=1tI7<|#lKO20>$iEo8VC1U?UpDfhe>~)C24AtKL%weCRiFEYw&81LTK-kr
zEcb5)H1c(N=$i&F8hnG?w+z1NYkb?_TR!(4gYOud$iJ%`zRPhh={RfU-*uej{=<OF
z`8IpMXTWJ*BKLiR@A+1KVDNpPds*A8#t*g4az8TQa(+OKR}5(KWpY0@c*WQFiE?(_
zs|K9rC+z*H!H<mm%;3jHes1tXBfl_s)yOXmern`3Mk{{CBdWRo=cJnae?h9*|Cgki
z{$C@7=M;JW`M;n3uZaXo=47eFX9<~|g2-gi)ELug6VFBoB7}{^iU9y4evCG=J+34w
z2_>$SDoIM25=c4XT~5Y=K_8pBsIb>pB@(F25YR0OvRD-s*^b2opo=p^bXhb<TxUCK
zP==a}B#{g>YLT=Zbtp$&hUl$6=x9K_?N|zewyq+E+%n?CMpO%k>;w(OY0Gq3E3hI^
zS&0=^WfdB+Dnod-I_Ow~)wZJvKvPDV(HwNFMYHW#2f?}wVcPnjV*}RPjutFOONLNw
zW6-e)8*N7`R-)A|s|{-i*VYoNZ6HwFBpr67bawqWaV$|<2Rd)cwyr5`yV*sjZ^t%l
zpCM4Y%iGN~z5_eD%mk5IyAIpMVY}?G-Na?PX9(8z*kK-a!sEbc0=6wWaxX{jwIlB)
z9=m&nQ0*Q&Qi0k&J90lZ>&OEfdBBc5NE~)>h7j$L9jO5AupM~>ZG>i93CfP**i9lb
zk_p1G6Dpk~?m9U`Aa)e@-Xsb;WtC6kNY@lCpQeGEH>Zij?nCD^Vc31xI!z?jhO-j{
zW1Deif_Q9;l5_k<k_WjXBoE<!BC-eMJkAk{JxCn(kfadB!y52WoY#1dqFci~iVGU;
zQKT#u*Kob)hii`?uDyP^ru}fego{LIJ%ney#Aay%vr95S-1P_@cWZ_yZHrb9gl8-x
zZWFAPYwG}u2euAjkWg)iIL+Q9qeNqmA~xq<A0slmf=7wet`MmWV?0B!HiD}e;<d;4
z-}p`aYl3v<rroN?@fJLuk!yI2sOy^C0AZ4Tq?0p?8<aig&eQw;+dRERk;&Sr=w#tk
zWpILv2&0NotS~A##BPMgnkRodjTS3hDkjD$4ndp}Z6;hQ2_TI>h^eAL3Q>T<4#x@b
zVu?h(TIPh9ny=Ly5gnxYxMl!db4<<0Xg(G)AJ-kC`8Yk3AGDD9I1@~QNEJ*iWjY1?
zG{h-bllos!rc;!0in7oY<&^Xb%7ZWP2Q4&3IrEgma<f7zIn5&4SroEUNhyGOZjKFX
zYL9U6{9UP#Dw{n$;}z(Rgdl5v2U){1?Qttfq8&~l7JJUDM)6`)bufw-%aW`<Kew9m
z)NpY%-r`)L&bEHk^fOgDUeZ?Ul!C?8D8#Q>XmQodrll-~OY`+<9e1D}wZ6+NiTaZ3
zlcZ08dOb@G%%w}2ZNoMBGAxruO=Pcsj(;iyUH_Bg1xtZxrr$=$G}CXLtVCUY@>LY#
ze&qBs2iJKOSz!&9OA}Tw|275LbS;`Rn+DQ4N{WCrJ3B#<zn&R7U*vC~q~N!O`F11L
z$tJ94;k<!Wa|?6uN^I2dTZv7&?aa@bJXWGjlgCPI*5tw3T&rplt*S}1swUB@nnbJW
zRoKG3u7*~j-EvWK9hP&+by_Y-ZmZ?M3Y4lg^8FV9o!xS-F2Hh0U4!MI%UGii0an9u
zQPr@V)v%n^u$<LMqG*ou6Cb$6<WL;p^N^n$N~Zh^zL>tZ*&qm^CVpa6vqMfU<oOQE
zdim?YjOTq=3d0kc@B_$_s4uxbN%{o-qYjTQ58d2H9loweWhj6ag#xGw6<9RQ^HPbb
zX@Z1OsSZbP4TW3)F}CvoRLetDhb4^dx*%j47+?7ST1rU+NHIh<fR-_`^8v)yAR9nn
z%rlM^Lok9H8NuMIPBldRUXA7IkZP<@|5an9I<Fe5)N|DuK&zGL!w-o*{E+Cw4~ahf
z)DU<00aSxEmW#5j$#O2aX3HhXt+gCL>p?hJ&W3~KY&clXhJ)p7IMkvh(6F4<u$<Mf
zoYk<L)i8XEipqKqvVm!5LntS%rxfJGbvZUjJK7^0EQL~pLKNk1im0@_QBKT{Rp5lU
z=ERMNX-?c2<iw3=(VVzZHqmw~7t|WcgS-m+(b^C8W%FQbHV?LH9&BA;L7v`y;dLfH
z^DIw@8{ySy?4Ky9{%HuP^2y-Tl<?FLZ{#La_r?&JwGmu_*u<H_DX*Z27o9u%RF`uS
zu`cJ#qil17wvQ!viXxti_`p#<xHx*z<*X_2E}bJ6>ha3;>ulvAS<kFKt_E|J=jXC*
zoQuz?$U*o@p4LUE<U?vi7IQCo@#J1Nswf4{U6f;e=W@6B<f;y)Lg>}khXQP>CMJ)}
zX7t+O7i2FlE6jT-CZhK*za+}k(F|`rLrGN7&$LCHFMM&9dakejPG8se^Wf%uU7z)J
zz52R-;k7mR6;Bbfmb1@p_);v*+0YGNHb*X$#*nNVj*zSy&W7xUBfsIcjT^ok%Q=xh
z*D|`f(T&N9U=2a>#Uv7$&6;57<O0NRrIA&LVAX&=*RwX^gB&wR1ZxJEK@8J}!5SY8
z^)rbq<9*P~Io2^bt><mWf`qqTlWgSv@et+w+d?Vu?}i+k^RHGb^e+__@-L6kuk>ph
zk^C|}z-?$$zqSDy6YASGi0|9YEFZRTigvE8{Z8Mu_k(@ebKIWwZM*umec^R=WcO81
zlXP-(wsLbivzxPxr)xX5=Zvbe@0ueQvaHJYtFw28WOeo~?96&<XWmh^jegpV9oU_n
zZVwA14|`a6b;@3T;oi{hcToy<-^;PN-DhQ{yHACMcK;rhbe%k1460x0h5anm?ou!8
z#}4(ve(X~Z?8h$O0|&5Q4sx19T;HKPJ#eTW?8|!KP}T#7)B}eWUfW@A@ev%!Y3UXp
zog){rSj_k979S7Ey2U4OJiEom^IL4&xWy-NGIySi@;n`5tlyjUyZ@NMS~G&qf&7tD
zA#W+yM{B3;NSwB3i>0WZtuubxnE_U#+gOTj!<np0A7Cwe2KRHyb2!F=^Z{0*53(9P
zE9bd^=R*#Dm{Q>2hjMJr!QHk=sIZWOFT4rIrjWu>b!-a9)UhcR+xElk2}|LG?_d5b
zAw8U>m&@zD)4#p_U|-h1y;=YEs(*VIURpXxSqZu1ISIMU>X?Wy^bv_yW_WoPMkb4S
zNmjifv~>PoXsJ;Vp93{r3p0ajjU{FF3gxd8vro~FrHDzHSF6Qf?q`Ca3bmW*Uppau
z@oWA-nYR$sz4`+s0V|?nFH$by4K7lxbAyZ2e;HT6n7c&h+2t><ql|NcSm$s67qltC
zo41T_WJ~l{2masByS9u!c~*K3YTQ^Ik#Q-i$#5aOS>N1V)MhKbq-LEYLnRrCWPNDU
z#n)z$rX575&s%dXF?OY5Y-!;1OKFD}Ywp5npu`iQrs{KSU{ia9gXi1ij|`}D<99-|
zm-H9?NMtfwyc99-@NFX(&6-E_@gf`DO->;rE-dp0@D+*|-6H4C3hB9<G|uOf*qrB+
z(EEnXyYs`VSf@k@Pvx)s?vb|;gjewHfkeK<DnkLoywoqTEQ$J(>l4B$?E}iWxkQ5+
zq5Qc$h!<_Mzomyttg29nwHU5nTQOFrgW%#X|Ef*TF{aA$B~~qkT#fFRSJiwJ$UemC
z(?Gt&(sD{mtcFmD)rf{riN(^&mRR%4D<SNu(b|eZuC<kxMXar~CSq-+1<`8#V6AM3
zj7XwHMkFr55lK>OD=o3s5mx$zl|#-itQ2bcg_TRrFRT=kt_w;mTL^JK9a!#76ijtU
z24QVEt6@1?Vp-0XSeAn>Ji*j$^z=F?K*Eg^(`rc}`EW=iZtFi=G0qAn@ubmz6Vh^O
zd@P+B>Kne?lKU4do7-CSUyW=>&&bed-(YIIZ)6zhv9Xacj9(oaPWNJb1Y=i+$NPrT
zxRmM}ygHVS@AGNX^3%ufKE54&L!*P~q4e-LhEl^<Q-k2YwHX<^rc&y%?O5LI{2!US
B(JBA{

literal 0
HcmV?d00001

diff --git a/src/wasm/Hacl_NaCl.wasm b/src/wasm/Hacl_NaCl.wasm
index 1762f27f45e6c41e8de668d97407726f73a6970e..82891f579a9b220688c6f7b27399f23ab4db087d 100644
GIT binary patch
delta 278
zcmZ3iK1Y3mJTr5B{X{XjiI#jD{Ztqk-%c)O44Axtk&VUJh=FnPJ4SgHLo)`Z$t#%@
z8JU4xNv_14%=El?GZO}u$!8fA7+EI^GHEihO}1u|V`QIP%cMQ|6jP(DphseIPQ0(9
zb9_L4PNku-fvG4$WOF_<KQkl8<a#zSMwZE1oH3I>u_XZc{d}T~ESodg%NUs%6&NNz
z;xIr`R0d=(<OC|1yqz;fmPLU<flq)(%8@~Xfx(f{kue8^K%6WAmdyrSJdDg7imH<*
h@QDCT4B_@;g6rPNdlhEY<fVMqk=QYt-}7%_1_0R8MF9W+

delta 402
zcmbQEzF2*NyewlqV|@Z+9aBA!1T*0TbAA0r8#6}6FO!QI114)TaWXPaHf2&^WSV@I
zQJ%%vh=F->7?UO=%j5<oIYw5Xpb{h7<oQh6jO>#?Fe*+y#nh-G;E|Y|6CaSDQ)y^y
zU>a|1B#SN(U$8lYnV(sLUCNO`gMq>E0fQrB4hVsGSptlc``Px0!z2|@<u)g=moYLk
zC@@aG&0&DggqX?ooKkR81W-+xyn!<YXb8(>elAHy#?6{sJd6_TimDLjpxfxn?Z<@E
ZpiR72;c{piC(q-%jz=zL^9%kh%mA43SF8X4

diff --git a/src/wasm/Hacl_P256.wasm b/src/wasm/Hacl_P256.wasm
index 83a71ab29e05334d2a59c1cdd3a5fc530097d357..017ee9e8d9833675c9f3f114e625ccccd67c2c35 100644
GIT binary patch
delta 193
zcmeC}V*S_2x`CI8=~Me=ekOBfE^&{<<eYeq#Nv#}p{(-82yT3^hoez^22dj2*usQC
zfDf(^EQ3(kypeVLMpi}-9d>>NCIv<T{^=8q8QZqE88NajvVw)S_Zu_zF+!yUA=0Z%
m8SgLwg%}kWfI2o>!*oD}1Rz4<CX8$#Lx8HbueWB5;{*UkFEY&l

delta 179
zcmey@$=ch+x`CI8sj*`-Ka)8#zl=v>a!!13Nl|KIZf0Kk<N#KA46#Z~vF1gr+ZVAi
zdg!n+DKIMVZ+~yd*uw~AHyAVaLD|z6STkmCuQO%*$;8U2z@WfC9jGvSI=2m@?Dlda
YMixe>C{PPjl*5FP4Jx`Fs5y=k0RI{=d;kCd

diff --git a/src/wasm/Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305.wasm b/src/wasm/Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305.wasm
new file mode 100644
index 0000000000000000000000000000000000000000..bdfde53755a00c6d8c43b1ab7f7cea670bdde001
GIT binary patch
literal 1993
zcmc&!Pfyf99R0Q3w(J&kquz{h3KzUIvcSf0+Y6CsjERXJ%g~*|K-+1iQzIO<L=IlL
z`4QBMM?Z#F?;gGQ6`a{WW(oxjiI{BKPUg*fZ{P2=fQ!@s0OQ;7@iE-oHb%y1+o+66
zzl_YPwpQC#RjHfYIdiRTOcv&kx02j*pJ0wtl2|E8P3U^Br1pz4&lwkYwC8Y#2$qok
z8o-4RoY37Yln_IQ+|pT6oFps)QwKE2toFdtA$N6-nUgTb(E%AkOGkEfq~JIQTi<Tb
zTIqwML+<Ge(H@Bo!ksdX2@eZ;M8MUtw{@%x@Q82*bF)Z0Lo9Gu2+W05f%Q%e(`k3S
z`Gp(Kcwtm9ICVYuL`}l&J;qTIZl!q=K4;0{%Id@QaFdc`xY~R3EbOiK!#FLIkgtUq
z<1$F)rhWOLmENQ2p`LcAbAPP#TSQUkGew$F0iJ&eB9@S159ZIXD6Kln!WottV#;!(
zJqbzv8O!K=RzWx)RZylmYE=VR2JjtZ`0f%6V83evat7eHd>gfp-IrzjbOHE-@pot#
zQ-3v^&|jw9_SA@rOn(j;gYLjc<Zs_^OIP2m@ljXWvZYlUInA~vLO)wq7CLd7guM5%
zN?^bec&DzKTA&&f%!V7(8Wfys&<2WFn$rXe$}&VaXB>%=XJm*3L%hrcO$mxIO-fEW
zo0I$UVoHmGX1hon<^<6UCHRn)0;M?LKu_sTu$76IBod6PCr-11Mn&+NM6zm&58zUp
zvL#cST4%4>rt~ngISW#~fI+R_*X&7)uG!P%jk=;M^=%7T{Kx5`j0qI8G8>)?NDc0P
R2GlgPnx}sskd&(h^b15TaaRBU

literal 0
HcmV?d00001

diff --git a/src/wasm/Hacl_Poly1305_32.wasm b/src/wasm/Hacl_Poly1305_32.wasm
deleted file mode 100644
index 65a9b66e57f25f15424b49f8aabf3c24245e385e..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 6511
zcmbVQXINa<5q%c~yk#j*fW!*5GeQE?J4mu@X>pS*S5>Q`6+#k-CP20&J$4lXvg{P6
zICgsC#7?uZQyn|iNnFyK)9v)`KlznkNxtOFyag72a*{9BXU>_uGxxQ*=gwR&)!*ej
z&vP%FKY!jUJM7Xfeb{AOCihP|FEh#3;rvWSfg|I8vp;2F3LQ9>_wt>bI9{|Z)tBmO
z>&)+J>&o+j_#R$&$LZeQp1y&q9Nu!IZJ?*Kt#OU#y{b#9v;BB?3te2UOGWN1U8&B_
zo>ng}*WnVES-r~3&vluZJIC0awDhK0PqrQPrsR4Cxt;@ksa~%jclZ+5TG{9o=DN(z
zouTzaTkFY|Bd3oYYwK(2Z$I1S73KO?=KAslo^9*v@%*t@T6{D$kZS24Nc9cmXT1Ci
z`XLKq#qpBTLgOyuCHVoKd<}muoU<j>+Szh*Pv@DHt5>XTS-q;IcZ|1pw-0zRd3A@=
zy}VM}O0O9I&Bu%X2bW{KQ2z<9WcI7i(v@oUN@LM;@onV0;wMJ+sV)rb-@J5T#!pYl
zOid@f2G5g1cv1u}S&(#{Nx!p0d^m}rsM%}sya@aVf(Q~30I`J(FGjJ%5li|S?m9d4
zw-S^{Y4gAM-_!iRT*{jNG0w{oN7;}BC`I{@OocBM@Fy-_Nuzk|PO4~>j~UYt$aGAV
zYE0Lu=MG5%(~~u$7qiw3T5CoxqMEihlq?y&Ff28wPL_>(%%s&YdQoN}A+s?<=AcIA
zVx|DIL{Nd*`oI;K)8zStxlLX`K$DjsNRtOt=)=uZ@TFEEkogJ;S)c%^LnY=l7o>Hf
zO4J(j$>tji$QBq&kkuIjsx;KzwJ~3>ZY<CnFqY7JFvhz9rlB$$Vay_oS%fi*Fs2bn
z4~0ojrbq#%prDcJaM#5s75q#k<!Q%6DolI{=7jLWFco4!QH|xw*q|95ftxg^W<r!R
zv7|dGvHZ=<3UAw#&E}0_&5_Oy6lZTea+)+li7RmNxaE`6H*IMIHz~EsP$uOlW&V^i
zd4lY>uRxHK36-=o6Q<^5LKUXwWWqFBnhDcU!F;M@CRND{Ok=)Gmzhk^YW;w5@Gbg;
z8TujOsL`(w$4tJ^W~zkGJetKkitC4;!+|-NJ*=Onh)Gp6_NqFVU32weM8RkF5CUco
zA;Ih+fY}2bt5XQ1ULhe33Xp|*0Z$ew__A0bkR=KUS*ie8247v4EBLZPA&`{{30b88
zS&ijb6v`T`z~WHWVkMS@(uh@98cGvZV_7KcAXt-@^_Zuy0ksO3VZOpfEKs-{>#-1P
z)1h2}4Rlww370WK*=B5Hkg_B$XPmNTT!98Ol3j^Sj8%3OHZxY))krc{*)?cptg>ry
zCF;>cwgp!)R@qiu%~)mE;Tpy&+lFfytL%DgK^@kS-GHr(Rdyq;W2~~9u#K_GZpQVD
zRdx$*knOlpcHk!2iJN5?Zjs%VO0oyrHIw94>@wVo?btIU`>;!4KX%~OP!3=>_J(p0
z2h8UX4jQ(gC7CyRK@Ot@heJu>fW{obL4{WA#Gz1*VjohWwBe|Q97CJoaU4~r6KGRt
z$9^0Or2|KBJd~5@FrQ9z=wKIG(HTlN+NB34r57jU6uP7j-O>*qJ({{cdNp-@oYK_w
z(Wj~FqyLgx@cez$f>>U<B&!9*lQiKKTZ1PtuE?O9tDVp*@g&zNmwE|`w~j6KJd`8@
z-3)X;kWy+0SM7h$kOJ;O1t_F0acP%F8d9bjQpSnPax|ozmTE{j(_J4ka^f9K%#hWP
za@}BbQB38IQGwD)8dAv>Pc@ZkT=mm2Evq5bn5M5eYb9u@hUi{1wwve<GqIacD@;SU
zt6XA72{23VFwjlKCvZ~<2;5W>1Qq2-Lj*!i2y~6sa-bITOhbH1L(Z<a0Q2=>>J)qm
z1R;<Hg#-nH01Cu{NFWF*5Cjznf(isLpaMbgWrczY1VIIYpaNmL`5FbE0zn8U5QGE;
zf&dD{8Y~Yf5Noj_q(C%cWk`W&!m5x0u@0+43Pc^&r>PG03Y)kD37fh82uUnbP(@gV
z4P1q)2zAC(5$cVpA~YCNMObJ|6=5kZV}vTgGGnR;b;eW?>W!%)G#FDwSb~j=P(@g3
zOci07F;#>*W2y-C#;6EbjLR88U0|#-ssm${Q6m_uj7q^+WxKJRVzGllu~YV97ZqZ+
z?2j~r%ib33LF_X;ggrPgBrRO}gu}QM2UQ*Rs~#Ld%6wXJ#PBGEK{bQ|fwoYNfhsa2
z$EgH_6WEK^kn+$H$wRw^Oz1<qI(1Pd2;Df0lOZMIxT-}jdd&9}dUUW4CvYmHUUbU<
zdgV0r<2Llk?dX>~5W|3`ZVab2bz`_qQ#Xd&HFaaSL$|n+r0bB9<RunfT$gJGc__}2
zfHRbUvpD;|Nx(UrizMKzO2Ao4z}XxLI7dq*;9OP$BBx8;$vFv=fed1B{IWqLNjcTa
zgl<S0EP^)hxd-*KgS_lu&Slg3GcTK7>g70H>auCc;EbGi$jECP;&P|sRClse8n1--
zGi`U$CZyxIk)7LY#$9UmjIQG$hao4!yu>6~8*vzMa=~HP$z2W?oLqFc%Vh=aZsnQ-
zS^FNdRpKtH%b3Q!*3r&Ux!0MkwbJVy?soD9hkKmd>+o79Z*+K_lQ%iM-pQLCLcGCg
z;)J-D=o8;a42W+cCWvn)qM2`npD<a|%QSzB!&{u(r@_&W-THO)A>aCn-@qU4a}-DJ
zZgRiNRmTS$?sppVwut!h_J{=Xj))}Woe_b&%i#e!`||FH1oEDUB;>sjfxM5`*7+VZ
z^yU4AfqcL)ArBcs9(JmXo_x^Imk${R@?pb-e8dp)Q3tv_=t6nKflluyd(?q$A0Qib
zpyNYik2%owVY0^^_%M&Sw0z8gPowPP4tyMCpK##wDEp)XA4u7!9O%kBf7DT(KJ7qv
zPGQy)4h-QmW_`wiVVugWCmk5d>CAe{fuVd6qdLoH9T>_v&H9`JLpi5epLbv==QQgJ
z4h-d-W_{6tp?t58=`3GzU?}G_>uCpua!#|p?7&dYY1UU97|J=#`l<s%`I#QqS)OrV
zDCacmSqFx4PP3kKU?}G_>v;!;a!#|p=J1S@uRA>J<QoppIr*l;^G?3y@HHpjj+Tdf
z$KmU?I^??!--_7x^f!FR4axWQH)B6=;3VJW7ka_r+Ya9&`=P@NQN)iNei*SIJN($0
zkpDy(enP*Ws-MmBGxam}a|hnek2w0G126MavX>lQjAr?T!%GqSrT%6SztZ1~{n~-I
z^9x4&#(`7*lI*t*zlkD#r_B6*@4(Caj-!8Y__dQiI{enjpB#SW<j)Sjck&m9KR9`r
z*@{1MiK_PhiKycLXQHbAUx+IIFB6gU3X*^Q=M#S$jI+_n-2~!K-F5!Fm!Fp|$n<4<
z$|B?mPZW|_@$F#!$c{wM4G2Du4hR8TJwifC6(D8WNrqMoMx8#pJ=?yfvdx+rvY@NT
zs#ql|Y@mt-XjRCHZmgPPU1tN;JZGp5+2AFz5i^jmff|&dCS<oaGdnN~Gi_itShTS>
z582dJvXz@I;8}sN*PDkqdRw(@?4#S7k6H^^fVo%@vOKHH4%DO01{we~gt8C|vjdB;
z&;}Mmv=Li^CE0<cSYiXq*wnBYTaM+~ffZP811s54t+d-(g?g52i`dL9Wudl08qHIh
zn*YvI5msr9Xd03A%^9p8Y34`Yz@}}(FblQIlA9>w8?mw34YE>Ot8Q1&?Fw_-#JX(L
zFpIU#=B6!Nl8!?x*w(1$mGr#QJg;IscGWPOx2w%l3$<&^^IA5N^xQ(vE#|qEb=cNn
zHf`6Lrxs}2%=3DzVrjOHZQ2dEafCe@VUP{l&1&7ky6cu<7GgJG`v|MB9TvP3*EeT4
z^G;4MGBU(UY!{k_ScdJwx*=9#tFUK~#n@`>9%MbXMqwYHk+7d{gm3`2vSZsThp>;O
z*nZYw2P6d_2Q}e+9MXLE(W2?@<FIDCkCYZWOi7P*8o_55M+n%(5fUuY2w;(RgcVvV
z%d?}b&Dz+v9b?mW94GkUE)1icjReo4I!Q`KFS3~{(_c@rdEl>I=wzwZ#X8M)l3rG0
zr(SWI*2l`MAE#KU^|Mmz#z4qotp}$=)@!%%-@u6e86*ym*sHn&x8sgb&fqpyU1#JR
zJe<{s^v25toTHuO>E7hkd+!KWFAoS~lMV>|<P0v_i9?_nT1IgL_ZmDeIs__|A{3&i
z(U$f9bqF*uL(U;koU#y~I7lf)K9!*)Dh>_&$O<AzL;#fO2V*D69+3TFRP<R*Va=ef
zmh8D?WzM-|6_<VG5EY~XRYNizr92p7>l!_42L+^b;#_hDjq!8I8lD@?q)JdfvP*V$
zq$F8u4lOOSX3-w2vgTq|PL0L$N9!a7vsGjCa7gAukA;MqQjK}~@v2d)8Cs3`nxWNL
zpcz_?I?d2(ec*ZpUm6qwN)92xu8;s88zoS0=Zzj&gE60Mp|OB$kul0778|4HX{e`{
zHfE=c#_W{Qn4K~jgWg6B64?l27GcaHj9G*+i-^M^FNoJLL-P?2N_fo3!j5Os;|Udy
NU(geI=hb>~@82qQ;#mLy

diff --git a/src/wasm/Hacl_Streaming_Blake2.wasm b/src/wasm/Hacl_Streaming_Blake2.wasm
deleted file mode 100644
index ff2f0e69c58752afc3e308c5bba2882e315a67b9..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 12128
zcmeHNTXS1Savp#n2?7Tscn3*|>Lw`(yqdb0x~x_bC|TCUl2(>1+Lmkxq#y|gAV`Ct
zH<s2U_u^!?QkAN$<SqM_hrC3u`<PPxz*cSLB|jl~%F|Y!QkAdgoH-bfhce5~=2EpP
zv2eO)rhBHlr%(3>sKUl-L@5>d+n?_2sG%p3t;p7s$aZA=Nz3-OeCX%MR&?7uHkiRv
z8>VjQ*^MW?PDuQNn8uSQP<Xxuhz|4wPkY9q|MCqs*xi1sP%Eq!D=n+V)u>7*m$3i)
zo9pXqwR)fRoL?^1*DA$@(@K5Ilv5{F#GcY?OM#M?#oC&R+B15ZW)v!wwH4K3PZ+T$
zn2qGu3oGT~Qx&sk9=2z$yeO`e^UIsho)v5Pjnd1aYPIKv9Ph+}YTI{8p;CHY&BFri
z_LQ`(v0f{ztN8Hs!b&B7V|BffU#t|$#gq9fYt@Z<p<3UV+J7jY&p$8L^QA8z6qYN+
z{C(At)|0OnHeUF0CN#s7xsgA0GQX_FZ{#=EpBC!H{OV?<UQ&rY>&CxjU8S&5SIPgD
zZ7(Wkk9vFC%Z+U>H*Y(&XPv$6uyuOdohE=2FhL;?GuP@Z+p6U+cM#o)u5@=#Z(skw
zV5aRagb~?Rx!**S?SJ?=h~x=vm;3cvv9MaIJ~!SnR)vA9Yx#2VtNaRPz`mub>Y6#|
zno_k?SLwNfPWN1>x(~eI85C^1P(26U(6d^xsCwUeW^Ls?XYPBe3VV${bN^e9U{CkY
zJaFIz{+S04ydiz&Oota5$N(*o#IJuZf$=v82NjKOO>gH^Rw-`Hy;0moZRyrwJmn%4
z{7<)VJGJN3d8Mc=7b$aF%Vj*MTd~V}5w#1u;!^vpvL@vew{VPFD7K)K6KRbklhn@f
zT$($mL*%DLd`!eAD3Mc`MI9o(1L8X@@d;2dh)-aHjY(nR6P$#2hn!~dDN1?b9TD$9
zykm(^ffDg4U%WRdOnizv^?`E{*?GL82{#SXSSRH!>Y}9Vvx_<xRE)YfZC17Hzp5KT
zda$#eT>OTyD|jyTxP<khO-$V5`Zn&#MO^M_((K!~o8k*9PFVc<j7n2C_hP?VGc4VQ
z75z}tpUVVlz;mhJrCzAXcxw8MntoB!AEu^{2ds@{Rg4E|kTW!xi+O%^c?d+@5W^r~
zTJgBcBe{S7&)@#x>vRVnqC*mQhXckajf$`%0fUILD$X1*#%L_793BrC6Eu-kX+9b-
zj?uBK>f_@9BTLz=%J5{sn4+nyI>gg7Oozo+!*oP^HB2nF8>aCE<<JNnm9Y^zE@LA!
zDPtovjWO{^ooDE9ZfEBgahl1|#>NKEQif(_NyZbG@x*03aYLTCAy3?pCvHfKqXBAd
zGIH{tf8kwBHW!ohy$oC=CXsumJlDGLA;&2$`_e8H8{(efYT@!2vnN3W4sk#P4oL-;
z>7-6^NLRo}Q(A;|2aF!-5$(MJqmTN;A^ibkfCj`Ng8?H$8F9!^z!;`samcPXIK+p^
zp)RqYL)~I@hkC{64)u#eQZy)IDH@Wo6phGON<8R0WR$u*hm3j-IYJ5Xgu;gwm+-_T
zJaI`+T+$Pl^u#57aWI0pH{ti3;4sEjV(Z3&=MuY?A3*sxmgOpOlb8J)OVln0!D4-5
ziN`Q&9HPgqTdG9#xJwu_w{+Zy9v9K$A)<3?cU=9`uMt;$(XH5aME|wpvjcyqp0^l(
zHhtKSy>`T&7h7I>n1pXTDEfMFn>xOJ7~9UZv?7+?sMkMRRQIT3Q9b+u%s0w#|2~N@
zjDkUl^6l6*q$MfY6uFM3J?Ro#DT@|u2+BZyrzPLxLVg#*pi9CWNs`kP;ciNE4@J2b
zW<+8O-c>!IB4wFI-NB>hA=s}^vk=;N6XG@4N^J-p&r4wSQzuUS6CT(vpEuSZ_7!KK
zJ7Yx$crImJ(!r{?3d)a;jJPlpqL&6KCDAcR14!qXfUL^H_#A=sk=+3@;u6M8fQ%UF
zBO-kyL^@c9yi3_C@8E0NJBN|h?7T9n>4tZXShRSDhyw4B<sFX;?=UWjy5@-|;@4mE
z7>z=ONmYpUCOXCwxd`$(B4(T>XpD}~(R6~3`8)MKl6)NA2oxh68*%6Jp*l-Z%9b&!
z%9AvS&k9e0HC0CLkMImdXv&1mY||bT&o$98eg{)U&OA-g9KC~6&(J)!b%KuYNt)$T
zbdpcgG`|bSylaKrX;6O1oeCa<kXz6!#)7brJ9B_gJBy{~p!1v+YT&ta&b3Ye$`7@3
z5)9`;^wC*KmT`QL#JLx4=jkl}0Fy63!-d`9cEKf#nQ*&cG+Yo37eX|E^+V%CoeuaS
zyl@F#xM;j^8R;QMxuyVI)N~^NuUNDM;8nUB#KBc73_LEP;61uR?->o(=n}tA*Z4YJ
z<Qq8E4a-;8LHWLVKX?p$byKq#3&MPLi>}f|q=yd<;IrEhaR<8ZSUv;Kr8_Py1~&tg
z@3T8%(K{h}={8*vpWUVpJb&G#+x#O;ya(y`cKhp|OBgf$x@V-{6Y2Ltq=WS#{BfV|
zH~Hg%rW^kF$fCs`59wjxkB62&JTCmPL`y+Jct}e~2p`jf;Mh`se9}b6_){c=M-WP%
z(5Lh<eMFCN^B$X6($!>=Kf}$wShjM4P?rA2DE9EVo=l&6$std9NU!h{kNc#|1-i`3
zbeW!*xLIl1lH#XLbc~CbD*53VJ)x&mq=&Rj&#=qq^bx<H6)w>Wlm-PZ!!u<o=3G#I
z%$0)2Am%EX#aIv)bE^l4wJMgbLFbwkYv8%G=2|BJ<;U8ZM8jH$KC0435^Gf)O`FnB
z5$jatFEGCbCAHllS91wtCgf^HNlld0LX?2D;V1Yc*K;b#o3u&NV>RBc$>04FRZKj(
z)rN|t3xD#N_N$dP1g!2?n<`@n@b@6~MTZFVX$e?qKu!IUumZ;8B3Mv4vxD9et)%pp
zN~%M@f7OK71;)6QV$_P7L02rgTTQ{YoPuRimdiUqNzW;zwytRPB%HA!!LJQFQHiHf
z(WDQiwp;o>k%-NeISsWa8G_;mMUht9?W^@Bg{ke9-ck;^x7BTIk$b7vtJZs|M=J7O
z6j04ANI#*w4BqoG^o;%J69-V~q<ILt8?ttn0fl=Yb{PyF{hi7P+J_JWB`AsXF^q!o
zjao-ik;?IwsXsL{$s=eWP~J<~hyN~`P6wTay)(t5O|&$iicyi%D2fj>iA3qdFxm)F
z#!XWdk2TRT6f;TGEaSxZJd{oj?;4)S5g$cCW8#RsZH31XT3HWyIVu)73X>hh`3ypE
z43w0z$Na17O$sy9F;q*(Q4JmU!d9#>NfW4rCK~PGG#1W4!%Qv~fCE{f#A;g4l%`zY
zDKm11XF{}rH48bjmYhkU0y)!~GIC}`&TNPr?0AmmP<Tan9!5I>nJ28%&x7(4=R3ip
zZwVP;Ox}B=j-$GWrK8e2TZ74e6<icZSo}LT+zM%>Ir<ah(zT`Id%CdS3hdS$-5N&T
zlFlpquVqWe1VFi+hymqx^o9H*gK}{nlv`A1kV^Ii<>H9E#H6w&!K8aIzrCQG7LQEo
ziFd?P4qWS4t`$&D#HW1m-lQ<`e*)z!eDzyEIass-$_WvGazYp|#t~2sb`L-~2gz}C
z60c1I$~jC#^4>I{oP!)VHVHt-fN~B4l$%sYbl$r>?JzRgq{^}Z<s2lV?4+7!1Ijr_
zMpKjOEE+T9TM0aXa%WI(0F*;%i;wX$DkGqri~*FBF@SP122c)Tz)y1V*ME+#?(m`y
z%i*Q|%{Sk$2IgcLfH_$PU{00+m=loz=41@OoQwgOlQ95uGPVbp`ws!;dZJs-zSmZ+
za|^%~|MJrJ1n$H&0dOZo0Ne>-fICMj4sn_R?i?u^#Ayb&bAX5Rh|>&k=iqJKCr&fK
zorBl&fH=(ncMkB}j5y5zcMc+ESe&*iR?d3BomdgTomdgTomdgTomdgTos0pvlQ95y
zG6vwzavGpJ&uODliHXxRcqhvM-pMk6cd`uNornZ@Cu0EbWDIY283TByMS7J>%l>dX
z-t7Mm!MlI>YsB6@;N5Y=qYd7T`|)=$@NOs44BqWT1iTY2ieCZmb|T*kyfg9*cqj5Z
zjr>1>cO-QJ{jY&{#ybYQ6Yr4m4u41R&Tk*0yDnj)i<373jmafGCY1n6C)B9NI4iLx
zuwPcgekt>UX!W~7ye0tenJovfpX}M^BV?!1piY-DtBSY53_dG73)XBI1!Y7EN)(`e
zGca#~WXKzU^K57`CeWUibILSCZ#mIvI!z=eI&Dt$E}iBDnxzwTk`@3Oo=T_q3;@Hk
zG|%VgET2bx`vVj|fYE}g{yZqZu00n#dUY*qbV0K;CfBfKfKmU@>~eSVYs}?~ka7t+
zFIj*9crIOP0t_#iUj35P=a;akA;a%ZFM5FCMU(>pV0am@8sIx#goef4XuzUtL3gxh
zG%SjS#Sjf(T`?e!PC>x>B7pNEK>U3FT_XiJ?|rnH`pEj5u<M#`08}?DT3&%S=_Y*Y
zrM#Pdzb^sgaS=ba=my;~8a|-+82B3Bp{slsr@CwT>JBL1SGR-5z*j%gEXIO8wgc-P
z-K47k3_m=8&j7&j1L%HW`3yWDI4)feT5M2$|MNgB3P^2_Ub;^=1jM;dAL5OP2>|c-
zAwHKNeQCG9mRvx21}UK>BYjDv1H#)Q9juSxk5A~6CVzaY>4rZZS+w}$F+C3a@!0Z*
z$Av#Wqt61s@G*UcCj4{yG&r_g+k6upW59M?Fou20m0bo(_qklzWpia$Xqlg4JCxTz
z-XkjD&K6A+`cUIDWRt6a8ch^w%G!Bj6uW<}C)0B;A-$j%SXKcZD3hpE1}b=s%XE!O
zCgLhhhf%!RM8~*_sgm2)4sgJATBpZyz;$!LFKC@>RH9WNc{M828qT*tkGM`1fNgdD
zl3e~VqT$C@oPP<*kMqsoF^Ka&(=5h<u!#7|?D8!+=_QuFg3eb~e1qrGE7v+ZC_lbm
zNd&$M(MK=ok;L~)dhEH#2PuC-FZpZC--42@-C@4v62=Tjxn-1Wi4rv7VTaoGUq?ws
zORkdq(+J@|U`1<k@*jVNg3ZDy<0#U4{KBja_vE|alqTW~PAPdSq2mydwl7ZE^lKFh
z!b8^}VS(Bl?hVB$(Pik)qA>|)Y)E)JPPw02;FO-)eQ`=r+wFJ%|0i)u*`SY8;?dvf
zL2$~QNE1#e{!7At>)#8f^g5p1d9L9mX26ld;zQPHfSAW}9=bWE(an@WH^-acN`*$=
z21;Y-==Z7YEM-#Jp|v)Z9jdzH=*=eRD9PucbmwkNG>fKBzMu_2(KmpeHC!ml_Mq!P
z3OrPFQlfSeS9;RQRa2k@6`k^Pg*PcIYNt>rPNPDc1}5m26SI^>jhOXNQ8<?0!NPfH
zn77bOeDUjn+FH+?rc4OTOVXGR(FT@>U^dWI)P<HD0Wlk)1O(F!M=+;Rlb*!a%@IB=
zW_uT#JK*=tjqjQp&hjttG(?5rqW|_s!bSfo5(bPW|L$kVm=QIulD~<{Z${^WpQX+%
z>R<WJE&D(AZTu`%e%*MUR@PS6OO-;sv{t2Jt+rO9`ev<Kd`k5-s%=*5rPU%mE0ijm
zwPNC$_n94gaQEun^Hjobl#8pyYMoXK)y+bM@cZnw+E+pqp6daCTW{Nq{cFzunce&s
DKmk>_

diff --git a/src/wasm/Hacl_Streaming_Blake2b_256.wasm b/src/wasm/Hacl_Streaming_Blake2b_256.wasm
deleted file mode 100644
index 36e0d79281c79379c63478611d2c6c1dd9f0f0f0..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 4495
zcmdT|OII946t154OamE{Xi$v#Dws%A&=3Sg4e1dv<Y81Ws3A^zh6cK)=V4Dz6fiSH
zk9ypBjtdtqT<XGX{Ua`1`2$?Ja;N8bt6x;dIU023IWu(KTet3`zOU|7!}NAtVT>t1
z?e6Zf12>ee(!HVdl%D@5+3Q7PU)xRef;mslkip$H!6GsY+$6sHn4Aw}FY1n7H|%8H
zs3%xaU7^@ZomQ*qxQC^2xnj6Y+nAnW>>o_dOfuykOioX;gluw14n%ou8%~oYWs6uU
zx@~SW%9Kh<HaQq`qT6<}%F?pIh-?s+Q*P<inz6<*vSm@WbRE6Lvhws%dAho3RBPo*
zXMNpp%58JoV7dFx>s7maUT<%fOSWD!CM)I1XQoehT1io^1e2ZCn(i9qddGH6mJb0b
zkWw$VUEOh$J(m1bP$6AV2L^{SpNX;3W7-#KHT%s+BxGhh?@O*@=ykKPAqS4)P!i=v
zvs^Q_%2l$XkY<AwCil0}Y?v-nr}nqVuaFJwbJ;otYj3i_eWtkX7zP{4`};?TB$dL~
zUy0rO{5JhfB)Zw2#>N=qX>C{G8DxsNhiIfJHhqd|o<&w;vy365DK(x+o}&S~m^$Yz
zLRP443)yid8EK3sc?wCSrWqSh(u%4g%X3<a=SP{wtYVfS9WzpY7AR6SxiA|rk&MQ)
zVUdV6#&VR3kdwBSvn}$%bI6gL3+Bl=^1MI+zE}E1`2I_Za5&LT^%U=$rDvYL)Fir(
z(N~s7gDkR?g&)Pw;uI@Vl*FA<5BXbHQ#~!2MxJ)-&POHoI`SoU^*vJVDxvmUl^2j8
z1q!vxsUCS#p~h+$@Z`N32PIiV8^TcJe@OE0Ysvos4B~)~mc@%u_<bnygGlg01l1u4
zs)HmcH4m-(qM;8PL2=kqaazFBwDD<VBIrnUKZdC45BUQg>YCz%;0abS6<foPkT^`S
zhqc^dNE6ao7`8AF;YCtFnPE+__;AceK$#;LoMt&3!2>g_NCpq$2!Dv4BjkNV8;-mS
zN@2tT%>`^BDeyibypQzpPO68sU;p^ww_9lxG`8TOa}-Bo=se=dd*~dKWPuKV2pvf1
z_*z1TgOhLHJ;3kW;>Ynwv<c$ajmG2@e^gWWiEx_}g6sXa`6N!__za!PlO;C8=pgXN
zaFUM+u8rbRjNuqg6!ZL4P^14)`QwCRlrj-{kFkNCl70dSJW(U727eMy($nVSq#CdB
z3F6mNP%s`6?&(<ejGvClDLzT2!q1s|sAvjPfT(CHtmqj`@o9|XG$t{Pr*MV}n!z#t
zES~1i;aNV53I05h;dzPXvm^yHe=ZtEG=ITUaaz!i<}Zd>dOfE9zMrL~a};w<qBrR*
z%vn<HBn1SV69k;=6Ax!`Owju*k%-QgPw{g&%QZ4CQH0Xn6faqzxsc+eFhWU0DD{az
zs+U6ey%xmh3BQ*Kzxfcq1!_sJ;FTEZ&wKJ7=@%tgkp2QL5T<_1yC4zB*Ajm&Vi6a^
z2$%3Ozl=-#Rm}4%RMiy;t5-=1u(})#BdlKYRGb#{!|HWhz&y32H|_zmC33M$v6m&x
zNM~W$!ujYlk`!RJEOvCcPh2cvQDC-&H+;O_#1g+shAZTK<!-!IEYMtt*GlMpMR;H7
z<DFD*5gu>j?HC^Kc=8?|*Cbisv5M6Qk5vf|UrTsg$MvWQt>QX0p?C34RJLeB<(Qn}
zH>e5e;l}<=6RMzscg2ydgh#fD3SXmiP{s|c;Tm*0vqs25uivVCow~_<P3l8Jk}}_$
zrR+C6V{G^hX%m~|-R7pRHEZ0$0<U2KX2>`@Rt)3yn4IDbG8H|kc@G7)(88)HuoV{g
z9$MUii8>l^P(zdIYvUSsVe<}LeiIgdpV;ue#PgdZ1w8LW!-(e}cq&c{`tf`#%<^w~
z(l#yKp_n@oze#6d$C7F%Dd6{xVBk)lc-Y1@!S8LX`Va+e=|gPuTV&s*Al<uZ-nBq;
zA<esCkgf>Q?GuDlz2N6a<#f|zD!;9OK6KHPc0m19Tu@VqZiX&r$>2hkq4VM0NMqMP
zS{Q^E*TlSV6v*Oy6zKOo>{kVeWxkderqaD}fG($lba(tQ;oTkSqRCS{jTF*!4^n^m
z&$m-XX~l1=;->n0vfoWr{pu5{Tw$Y3{UR;iEheH@nTgWgJJdvFZ~T{6nORhu^_FSt
zuGwtBaGa(Cx8pR7HMmVUorY`H4Xo>?-EoY<g8v*lUN_rqdSimS)iPi<VC!4Wj*Gh9
zn#CovGO^fjuNqai>4=eIHri&jJ#o{h(%aPWQ*3{^ikGMVL7M)1*Z(0=dei%tX)a%x
gzcLH6-m;Cl(GYkyI=T(|??cns5|X~*O%TR^0*_)WQUCw|

diff --git a/src/wasm/Hacl_Streaming_Blake2s_128.wasm b/src/wasm/Hacl_Streaming_Blake2s_128.wasm
deleted file mode 100644
index 0fb085660a7f27b6681bffbc32e4e7cc0f688fd7..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 4260
zcmdT{OLG)e6uv$0nNGs+P@)kzmxu<0C;>wfV@!we5)zC+KzvL(lMd;gp6;Qi$0{hv
zpq5(hbYZ!4sSBxc<r;s33s?RCSFYS?mEY}GZc|ew(G^wc>3hyS_k8E}_w~(brnj34
zV@&z=-o1Nl$BMG4Y_2F>r5pZayWM0QdN(uOXfCh|2-timCR&8RC{5=3PmsK?ctQ8{
zreSBBMl-{z>NQ|r?zCF2=kJlmwN=A+ZDafdWB(Fze2gjo5^`*uW#o|EG7##xV|Xsh
z$`Pqjblbe?)KE%J4%wATqT9AxXL&hbNDhd%Q)}t<hOx#9a%5GG^gX@Bit_Y6dAhz~
z)El+c&icCH)!ODAgO#2*r`PS;IlaA6o3r(XG1jgf8=E*7cqK<|HJa?S)^y*fH9NL%
zvT{sF1x~%z_I1zCc3Ji_A%%QJ?du;Xd@jaHmuX+-)#A4w!zi2#vS0Q+LvNbSO&K`B
zK@l~_tu>6>wL0PeX*#Sjw!M|6WBN=zvAsp%4q4wem#qV=y}|moS>n297;K;%K0h#$
zRVqJzEuP*NAK^EX*(`Q7Ho_RsYY!MNP@!6S8Y4}y@mJ0BA{8|@#TXSdrNIl?Sq#{P
z+-$Ijio$JMR6NS0AdT@X&rz0g<Batwc|}#J$V*y|mxr0gtZI=_J{6??CRju@rMMaj
ziqV)hC<?JgSP7*7TFcs6$+nP%m#7506wM<!m3ajKKPbHre*Ovwdor83t`b~D+{?Id
zNNjRkvb`^=qKGO&l(33JtO}IO{kblc?=I)MS~gE*Jgxg5&#@&c&#~oOa6VwM_dAtW
zC<6yo8Vk8DvZ<t|_RtsDTYK!6Y=JgF14;e?DSzli{vFg$J3_WBUL}R^q$=M<8NM4-
z?Utz81xslJZ0$^jA#I>yPvGKM!1GW+o(c(da6Lf-*!73}$$)iD2}uYHtC~x#;d@~Y
z0((#^osVS#pGAWf^(B14iX=0rDHb10<%lG+m-@$9iT2WyCs`E%PtjifH10#lKBNsM
z*@dKN$Rf-|YJnAH9}?MzdSr*|8SRh1fBN%oo+=tU6R@+7_NCZ)Hn6wY*)Q3G9U@BD
zA&H&P3wAhh<POsl@%wlA0eUuh2;#LHPT4vBoTl)DLNUYi937zjbg)|H&qtMpH<iBt
zHWFaSaZ7iRAJ8L|p^*ln9Daxn;coN8a2;;&7on0Pq|o75Hb+x6Fg}{HbNnTQiaamV
zVH%~Eu<IlAGRite`}r6h<;Q7^pP(1{D<I|-NpdG(MUp$73=_$X2QH2Uy(D+?5u`SO
zrIWy%l%xiqMU$4a16Yw5CWRO#d*IOo?H5v;z}BGiP;OH+!Cyu2G!UjAmfN&Nn2Y5$
z9V1K&gy|j#aJ?3j7#s(D1}wY|7G`1=W^p`dq@@Hn7uZ_@JSEvefb%q;=wM!wLFk1F
zPSYtm9V48f*ZEmG!_UzSKaX9Vm$*6yE8^;GGEBI7BXDsn=*86qnx`2Y4;LSSvjwDB
z1oooD8GIHkS~Qou8L%SG7R3`??14)QbV_ixKo>*4F3|#i6M;*}zVt9(OBP`+=4&a=
zz9h0Q^~es_Tj1j|T~6_FC9t>nxGLF#k7Zg;_*j<s2)*Fr8eK~cgk`#h1K~PdNwzHx
zgtt?6j^Dt6@D7sF+jN7j(^Yy0Z{EAHmVy&Z<?rFmo@q#Dg0Lj~ty$FYeh^IWhlfLr
zYRGQ$mC(D=;5yCnRhp%hSU2@lNsO<h>>M``D$a*>TA?*EXqi@N9aY|>t9*m%+@uX|
zk<J@nrXgw0f)#1bOooZ(?7+pbpqJ*FkDxUNOI={PlGfm}$hD*$z>2iy3N^Sr@W`R7
zLTe7T7Cg%6h-i_+Zz0|Ti1)DMJc}?FOU{cyJOSeM0D-F=9q=mmHKy_obwu|iZD0LN
zbZxoJW&s^bHtJFe5Ngn-rFtVQjIc%TR2GRMTL`s+e|mA>QjvrbdLa@O%~T&cw*FzJ
z{gMe9Ds*2w$Mck<JemXb_y66pF_e{V;Y9oPSGHICrhfA&cCN5trhb_h4fkl$-;YY)
z5u@>UM6dpSit28&W!k!Lx(*qh=X&IKJjYlg-zBf(_-50fb=|Z(o>4gy-bW5J&2}4~
z5#--)8Du(S>$lyGPffiwMHkK0(esYKY}9?%6C=-b+Gf2y`hihL2?q|b?bRy2C?10{
t!;g{2p-_DO{Kqn{T$;Z$MP{>Q8%@Izd^;W8CVY~+-fdy&H-iaq{15${5SIV|

diff --git a/src/wasm/Hacl_Streaming_Poly1305_32.wasm b/src/wasm/Hacl_Streaming_Poly1305_32.wasm
deleted file mode 100644
index 0978260d791e79a69b6ccce923cd1f12aab68e84..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 5365
zcmb7INmpFg5x%b{UeAxYn`WqD5FiN<AZSUJY(4A<8(Bh%WMs!fs|Afr(+p`f3;V>h
z6nt#0vd$)(Y_ejTlcjz`R#{||UyxOH$w|JtZ|DcYlEVRR)qQoV?yawe`<^O4+cA_<
z#^0Ymf3B>@#-VZe*k~F}=U=$l^l4}IFw}IPY2FnG%0U3j-5G}w(a>LyU?JUhyIe2t
zRBGX!%1%gG$w%10gM<D3y?Uc7K$o6W8hf?M;$@}YA~HXxjJJr)E~-#aq%&v;`}np}
z-&5hB#EGqxYg<ouOV~;zDAI94Nx4?rTUXJbKp`mL9jCNkUf-^4s8~?a3Q9KW<$cu_
zd^`|*y#94%eY^DJU~{ulFFo7(wxUey!}5BqbZ@Wr&FuW8h0^>Sot`M|dk+uxH_DBQ
zivK@rHn(=So_(zn9&`z)D3_i!%JoLLslq=<<V2IHbS4}7QD~#7>=#C|?bQn~u~EI5
z2aS5AytB3YG`P>wIwl*H($=m@4!uDP_8Th2Z;)`0m41_qdyLGREN#{+6_t%U2ZVFO
zM&i{U<#@k;DgQ%<`KEaoX|^BQYC<WF+7`zsW|@7qWg9j8T2XGJHd|d&iek30&9U%J
z5bAd1rd~pAGF_`u+ms3j*^0v)p)f@jl}Z^=BblT&HtjseDK6^sqTUqs2}&%fBGku4
zeH`lJ0rd%RCe$Y;RY2%0^$AW=l2bNun$lu|{1ap-<D4KZCekpG4w%S*lM`gz6F5R=
znaFUK6kC+#c9`jao{oT?c5ueFR%Q95t5cpZgkSecmOHg0ZNm<WM>JElA}3_Ii@GQU
zS7I<1RfM{1bzD>?*v>^|Q5Sc6>*3KJtnJ0-dhM={JeV+7rQRxa+se)fu)?{U>$Qz4
z_nt85Le|4MN^zc2c2t}RAh-`6C{W&kOb@2RS<aAi2BHEG<9_Pr0V>$0EQe8<8>B%V
zTKX@3Ug0mxwDfPEW*Q(m;$a#hKH?GT<5B8s86N|a@OE+zOx9z@X_zNygePf2R1Y2T
zd77~Mj$gXm1#p*-U(zIab!4ZGfB2p+(xjd7C8oel9KYmgI?pq7fiKZTo~0?CqiLR}
zQJRq$9i>a+_EDNG!tJ9pS5!F+^F>88Du=r)gUJgrSiC4hj*Bu7U!gI&yc9hYMPsy}
zDHGJ9rYulJP31sc(G<}+S*jynQ>IwglqDO`R8IDwDUxlB)0n$LQ+kD_^a@Ss6`Im3
zauhuQe)10}fhra{Ot{6Nt?8797#U7Aa6@PUM>M56_<RKU9psgyHxe_^!Z3vZN>kDa
zAU&84XE{U88Hm#2PoyrAE`1_^EV8&g5I`M)0P3VHbsQmI+Ntx1yAi!T!RYM;;|CC8
zyA?orBvBuABewh8km*N!wE}1WoCHuI5I}>7>{b8`fs+6t#602{K|F{E9zp~Yk5Lb?
z;NC;S0$~r02!1^@D&Y0dm>}092R<i*$>TCu2x$yCL^TFNNcYk?9T*DKxTZ`{6PmI>
zO=>C!>b$0ipxq4zP3dsZlnw_?>2T1L4u?GTx+^rLS7=JF(3D=GDZN7R1v2@fO+1CN
zGZm<b7s0tT@q$mfWk;ScggL@dicqxGDNLUJshZf5MUu`?LTcg+#ib_BxHWNxrlls%
z@FnP;#fD}Bb<mJHI9nw=^XlNNR|jXM4$hvjAzON1aG%M){0<pnh0r<;{byp7zkYWV
zI_G9h8v@=W(>Eya#wm2HBREU(@75wsI~+x$vyOker0!FENv-`FbCx^zrzEl{WLuQj
zz7uIe8ycOjP1<(q1Rs&~@}G;fOn{f1&7vQ=wlXbSdJeWMN|W3DtK5#1@1S<>q!4$(
zmuNhG>+b~Tg4^Mf4!H1Ex1M4w;3&r^N--Z`O!ZI(F8GbK9h|^xUYc>5Uuz9|5HeMO
z&QV%`MtDmO<tPiBp({^0?!#EX{t5xCm0(?{B5{4J3m(=5!Mbqv-umr-{^!sC`Ywv9
zrEY15572<GLmWPMf{*Y}fER~TfER~j!gDya9Iof!@L?K;A;;DrU>>sKJmT)b8ImX>
zp<_=4LdOpfXBJAvC`4o1wzZ22a?ZADs30L4-$n&7P(BPAcRMhO$RK*eBy3zjak_|Z
zH-%0&O%pr={~?Kde5b*=_)htxkMAWt#aM6_zDT3D;yOojG$^>v(U9OeM<g*J*v=6<
z*v``&UxtYV>}}yRwhL9lGcS)9JZu*P+l90Dwdna(vWbgu&J{SP=sD*qvg;aM^OXwN
z@0{Qxd_BMm*f#>afc-Ar@Q}LE0!q&Tr}yYOz2}*`NmtmWo4iCtejg`%Kj8f(aIW`l
zpY*-|fu3S4ILrILpc_;~b|I+Vio!B2(>sE~GF=xGmg$xtuuShd2z*G({1MEoV1Fy8
z5m>1bo_PqYcnGWr0xM_l?KWKeF@5Z>5*Ob&!AJORfEO3926%DtJzDi#yxMZHo`Z|;
z(|tdm?jWD;BG!ND;jU9V$q$en#qCx~3DeT#KZCZ1T9O{>-1>w*k=$By=GM0P5nbg^
z=_;*xkbXvYXpKIF$zRc3{+vGJFX(grk{<C_aNt(~fWHLi0{n$f`T&>o6l1|z06+d2
zz?P{@cLdln-4$TVwCWsMXIPo;Iru)IGOxqR2DZ0x8sCj7;hBf;hKKKl;Jb15)+%nv
zO7f<ylKhmO%8f8^h~z)MNArw?4r6GNo&MEQow{18H(o6zDiSBInwGw1x@`@wlwJ!>
zNTdMA6va7h=jBEzZ-+WD+_zB8IBh3zzsOS7xuj+Q6KCKayhZxOQF^Gnm}cF(k|TtJ
zwlugeQCW0UTv*9u-AJ*8pJ=3_&TXV1UA@v`brXF}Z_2jz5$CwXs@v{$L8g-EylF~N
zx4eqtw~Ahy=3Z*IacQ*TeYhE?s0Rc#g3kK$e3AFw_;RwH9DI=r_yP@B`y%h|*%3O+
z7kS*V+vSeU{V+cOJ!q`HPy4|kBg78{pLBJ~6Nd2X{xv8`eOBW1742{G=_V99^j=<~
zs5|(!;YYL75&3xGMfZe?AmW0&;`oT%A*?oa`VA@hQoaQlYD6Vp81fxw+W#Ol9r$oH
zE#F_R(fZ!b{#LEr*xK8rO1-{Ur^Z2jx3WQvJ*pq<Hnw&uv{~M&9n>p{Th2H!{BZSy
k)oZl1vtO(1RCXJ*Q{Fu&*9f2Y_UhjVSAMD=3{R^60k_g4HUIzs

diff --git a/src/wasm/INFO.txt b/src/wasm/INFO.txt
index 60cb7b00..efb7071d 100644
--- a/src/wasm/INFO.txt
+++ b/src/wasm/INFO.txt
@@ -1,4 +1,4 @@
 This code was generated with the following toolchain.
-F* version: bc622701c668f6b4092760879372968265d4a4e1
-Karamel version: 7cffd27cfefbd220e986e561e8d350f043609f76
+F* version: 61ccb460777edf004920e775a2e2d5825726354a
+Karamel version: 7460546b9dd1a61b882f97de52c8fcc60cb09bbd
 Vale version: 0.3.19
diff --git a/src/wasm/layouts.json b/src/wasm/layouts.json
index d9d2a29a..81273a66 100644
--- a/src/wasm/layouts.json
+++ b/src/wasm/layouts.json
@@ -1 +1 @@
-{"Spec_Hash_Definitions_hash_alg":["LEnum"],"Prims_string":["LBuiltin",["I32"],["A32"]],"Prims_int":["LBuiltin",["I32"],["A32"]],"K___uint32_t_uint32_t":["LFlat",{"size":8,"fields":[["fst",[0,["Int",["A32"]]]],["snd",[4,["Int",["A32"]]]]]}],"__bool_bool_bool_bool":["LFlat",{"size":4,"fields":[["fst",[0,["Int",["A8"]]]],["snd",[1,["Int",["A8"]]]],["thd",[2,["Int",["A8"]]]],["f3",[3,["Int",["A8"]]]]]}],"__bool_bool":["LFlat",{"size":2,"fields":[["fst",[0,["Int",["A8"]]]],["snd",[1,["Int",["A8"]]]]]}],"Hacl_Streaming_Types_error_code":["LEnum"],"Hacl_Streaming_Poly1305_32_poly1305_32_state":["LFlat",{"size":20,"fields":[["block_state",[0,["Pointer",["Int",["A64"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]],["p_key",[16,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Streaming_MD_state_64":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Int",["A64"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"Hacl_Streaming_MD_state_32":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Int",["A32"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"Hacl_Streaming_Keccak_state":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Streaming_Keccak_hash_buf"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"hash_buf2":["LFlat",{"size":16,"fields":[["fst",[0,["Layout","Hacl_Streaming_Keccak_hash_buf"]]],["snd",[8,["Layout","Hacl_Streaming_Keccak_hash_buf"]]]]}],"Hacl_Streaming_Keccak_hash_buf":["LFlat",{"size":8,"fields":[["fst",[0,["Int",["A32"]]]],["snd",[4,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Streaming_Blake2s_128_blake2s_128_state":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Streaming_Blake2s_128_blake2s_128_block_state"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Streaming_Blake2s_128_blake2s_128_block_state":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Unknown"]]]],["snd",[4,["Pointer",["Unknown"]]]]]}],"Hacl_Streaming_Blake2b_256_blake2b_256_state":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Streaming_Blake2b_256_blake2b_256_block_state"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Streaming_Blake2b_256_blake2b_256_block_state":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Unknown"]]]],["snd",[4,["Pointer",["Unknown"]]]]]}],"Hacl_Streaming_Blake2_blake2s_32_state":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Streaming_Blake2_blake2s_32_block_state"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Streaming_Blake2_blake2s_32_block_state":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A32"]]]]],["snd",[4,["Pointer",["Int",["A32"]]]]]]}],"Hacl_Streaming_Blake2_blake2b_32_state":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Streaming_Blake2_blake2b_32_block_state"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Streaming_Blake2_blake2b_32_block_state":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A64"]]]]],["snd",[4,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Impl_SHA2_Types_uint8_8p":["LFlat",{"size":56,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Impl_SHA2_Types_uint8_7p"]]]]}],"Hacl_Impl_SHA2_Types_uint8_7p":["LFlat",{"size":48,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Impl_SHA2_Types_uint8_6p"]]]]}],"Hacl_Impl_SHA2_Types_uint8_6p":["LFlat",{"size":40,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Impl_SHA2_Types_uint8_5p"]]]]}],"Hacl_Impl_SHA2_Types_uint8_5p":["LFlat",{"size":32,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Impl_SHA2_Types_uint8_4p"]]]]}],"Hacl_Impl_SHA2_Types_uint8_4p":["LFlat",{"size":24,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Impl_SHA2_Types_uint8_3p"]]]]}],"Hacl_Impl_SHA2_Types_uint8_3p":["LFlat",{"size":16,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Impl_SHA2_Types_uint8_2p"]]]]}],"Hacl_Impl_SHA2_Types_uint8_2x8p":["LFlat",{"size":112,"fields":[["fst",[0,["Layout","Hacl_Impl_SHA2_Types_uint8_8p"]]],["snd",[56,["Layout","Hacl_Impl_SHA2_Types_uint8_8p"]]]]}],"Hacl_Impl_SHA2_Types_uint8_2x4p":["LFlat",{"size":48,"fields":[["fst",[0,["Layout","Hacl_Impl_SHA2_Types_uint8_4p"]]],["snd",[24,["Layout","Hacl_Impl_SHA2_Types_uint8_4p"]]]]}],"Hacl_Impl_SHA2_Types_uint8_2p":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[4,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Impl_HPKE_context_s":["LFlat",{"size":16,"fields":[["ctx_key",[0,["Pointer",["Int",["A8"]]]]],["ctx_nonce",[4,["Pointer",["Int",["A8"]]]]],["ctx_seq",[8,["Pointer",["Int",["A64"]]]]],["ctx_exporter",[12,["Pointer",["Int",["A8"]]]]]]}],"Hacl_HMAC_DRBG_state":["LFlat",{"size":12,"fields":[["k",[0,["Pointer",["Int",["A8"]]]]],["v",[4,["Pointer",["Int",["A8"]]]]],["reseed_counter",[8,["Pointer",["Int",["A32"]]]]]]}],"Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64":["LFlat",{"size":20,"fields":[["len",[0,["Int",["A32"]]]],["n",[4,["Pointer",["Int",["A64"]]]]],["mu",[8,["Int",["A64"]]]],["r2",[16,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32":["LFlat",{"size":16,"fields":[["len",[0,["Int",["A32"]]]],["n",[4,["Pointer",["Int",["A32"]]]]],["mu",[8,["Int",["A32"]]]],["r2",[12,["Pointer",["Int",["A32"]]]]]]}],"FStar_UInt128_uint128":["LFlat",{"size":16,"fields":[["low",[0,["Int",["A64"]]]],["high",[8,["Int",["A64"]]]]]}],"EverCrypt_Hash_Incremental_hash_state":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Layout","EverCrypt_Hash_state_s"]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"state_s_tags":["LEnum"],"EverCrypt_Hash_state_s":["LFlat",{"size":12,"fields":[["tag",[0,["Int",["A32"]]]],["val",[8,["Union",[["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A32"]]],["Pointer",["Unknown"]],["Pointer",["Int",["A64"]]],["Pointer",["Unknown"]]]]]]]}],"EverCrypt_Error_error_code":["LEnum"],"C_String_t_":["LBuiltin",["I32"],["A32"]],"C_String_t":["LBuiltin",["I32"],["A32"]],"C_Compat_String_t_":["LBuiltin",["I32"],["A32"]],"C_Compat_String_t":["LBuiltin",["I32"],["A32"]],"exit_code":["LBuiltin",["I32"],["A32"]],"clock_t":["LBuiltin",["I32"],["A32"]]}
\ No newline at end of file
+{"Spec_Hash_Definitions_hash_alg":["LEnum"],"Prims_string":["LBuiltin",["I32"],["A32"]],"Prims_int":["LBuiltin",["I32"],["A32"]],"K___uint32_t_uint32_t":["LFlat",{"size":8,"fields":[["fst",[0,["Int",["A32"]]]],["snd",[4,["Int",["A32"]]]]]}],"__bool_bool_bool_bool":["LFlat",{"size":4,"fields":[["fst",[0,["Int",["A8"]]]],["snd",[1,["Int",["A8"]]]],["thd",[2,["Int",["A8"]]]],["f3",[3,["Int",["A8"]]]]]}],"__bool_bool":["LFlat",{"size":2,"fields":[["fst",[0,["Int",["A8"]]]],["snd",[1,["Int",["A8"]]]]]}],"Hacl_Streaming_Types_error_code":["LEnum"],"Hacl_MAC_Poly1305_state_t":["LFlat",{"size":20,"fields":[["block_state",[0,["Pointer",["Int",["A64"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]],["p_key",[16,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Streaming_MD_state_64":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Int",["A64"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"Hacl_Streaming_MD_state_32":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Int",["A32"]]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"Hacl_Hash_SHA3_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_SHA3_hash_buf"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"hash_buf2":["LFlat",{"size":16,"fields":[["fst",[0,["Layout","Hacl_Hash_SHA3_hash_buf"]]],["snd",[8,["Layout","Hacl_Hash_SHA3_hash_buf"]]]]}],"Hacl_Hash_SHA3_hash_buf":["LFlat",{"size":8,"fields":[["fst",[0,["Int",["A32"]]]],["snd",[4,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Hash_Blake2s_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2s_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2s_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A32"]]]]],["snd",[4,["Pointer",["Int",["A32"]]]]]]}],"Hacl_Hash_Blake2s_Simd128_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2s_Simd128_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2s_Simd128_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Unknown"]]]],["snd",[4,["Pointer",["Unknown"]]]]]}],"Hacl_Hash_Blake2b_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2b_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2b_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A64"]]]]],["snd",[4,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Hash_Blake2b_Simd256_state_t":["LFlat",{"size":24,"fields":[["block_state",[0,["Layout","Hacl_Hash_Blake2b_Simd256_block_state_t"]]],["buf",[8,["Pointer",["Int",["A8"]]]]],["total_len",[16,["Int",["A64"]]]]]}],"Hacl_Hash_Blake2b_Simd256_block_state_t":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Unknown"]]]],["snd",[4,["Pointer",["Unknown"]]]]]}],"Hacl_Hash_SHA2_uint8_8p":["LFlat",{"size":56,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_7p"]]]]}],"Hacl_Hash_SHA2_uint8_7p":["LFlat",{"size":48,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_6p"]]]]}],"Hacl_Hash_SHA2_uint8_6p":["LFlat",{"size":40,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_5p"]]]]}],"Hacl_Hash_SHA2_uint8_5p":["LFlat",{"size":32,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_4p"]]]]}],"Hacl_Hash_SHA2_uint8_4p":["LFlat",{"size":24,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_3p"]]]]}],"Hacl_Hash_SHA2_uint8_3p":["LFlat",{"size":16,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[8,["Layout","Hacl_Hash_SHA2_uint8_2p"]]]]}],"Hacl_Hash_SHA2_uint8_2x8p":["LFlat",{"size":112,"fields":[["fst",[0,["Layout","Hacl_Hash_SHA2_uint8_8p"]]],["snd",[56,["Layout","Hacl_Hash_SHA2_uint8_8p"]]]]}],"Hacl_Hash_SHA2_uint8_2x4p":["LFlat",{"size":48,"fields":[["fst",[0,["Layout","Hacl_Hash_SHA2_uint8_4p"]]],["snd",[24,["Layout","Hacl_Hash_SHA2_uint8_4p"]]]]}],"Hacl_Hash_SHA2_uint8_2p":["LFlat",{"size":8,"fields":[["fst",[0,["Pointer",["Int",["A8"]]]]],["snd",[4,["Pointer",["Int",["A8"]]]]]]}],"Hacl_Impl_HPKE_context_s":["LFlat",{"size":16,"fields":[["ctx_key",[0,["Pointer",["Int",["A8"]]]]],["ctx_nonce",[4,["Pointer",["Int",["A8"]]]]],["ctx_seq",[8,["Pointer",["Int",["A64"]]]]],["ctx_exporter",[12,["Pointer",["Int",["A8"]]]]]]}],"Hacl_HMAC_DRBG_state":["LFlat",{"size":12,"fields":[["k",[0,["Pointer",["Int",["A8"]]]]],["v",[4,["Pointer",["Int",["A8"]]]]],["reseed_counter",[8,["Pointer",["Int",["A32"]]]]]]}],"Hacl_Bignum_MontArithmetic_bn_mont_ctx_u64":["LFlat",{"size":20,"fields":[["len",[0,["Int",["A32"]]]],["n",[4,["Pointer",["Int",["A64"]]]]],["mu",[8,["Int",["A64"]]]],["r2",[16,["Pointer",["Int",["A64"]]]]]]}],"Hacl_Bignum_MontArithmetic_bn_mont_ctx_u32":["LFlat",{"size":16,"fields":[["len",[0,["Int",["A32"]]]],["n",[4,["Pointer",["Int",["A32"]]]]],["mu",[8,["Int",["A32"]]]],["r2",[12,["Pointer",["Int",["A32"]]]]]]}],"FStar_UInt128_uint128":["LFlat",{"size":16,"fields":[["low",[0,["Int",["A64"]]]],["high",[8,["Int",["A64"]]]]]}],"EverCrypt_Hash_Incremental_state_t":["LFlat",{"size":16,"fields":[["block_state",[0,["Pointer",["Layout","EverCrypt_Hash_state_s"]]]],["buf",[4,["Pointer",["Int",["A8"]]]]],["total_len",[8,["Int",["A64"]]]]]}],"state_s_tags":["LEnum"],"EverCrypt_Hash_state_s":["LFlat",{"size":12,"fields":[["tag",[0,["Int",["A32"]]]],["val",[8,["Union",[["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A32"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A64"]]],["Pointer",["Int",["A32"]]],["Pointer",["Unknown"]],["Pointer",["Int",["A64"]]],["Pointer",["Unknown"]]]]]]]}],"EverCrypt_Error_error_code":["LEnum"],"C_String_t_":["LBuiltin",["I32"],["A32"]],"C_String_t":["LBuiltin",["I32"],["A32"]],"C_Compat_String_t_":["LBuiltin",["I32"],["A32"]],"C_Compat_String_t":["LBuiltin",["I32"],["A32"]],"exit_code":["LBuiltin",["I32"],["A32"]],"clock_t":["LBuiltin",["I32"],["A32"]]}
\ No newline at end of file
diff --git a/src/wasm/main.html b/src/wasm/main.html
index 76be617a..a4605811 100644
--- a/src/wasm/main.html
+++ b/src/wasm/main.html
@@ -8,7 +8,7 @@
     <script type="application/javascript" src="./test.js"></script>
 
     <script type="application/javascript">
-      var my_modules = ["WasmSupport", "FStar", "LowStar_Endianness", "Hacl_Impl_Blake2_Constants", "Hacl_Lib", "Hacl_Hash_Blake2", "Hacl_Hash_Blake2b_256", "Hacl_Hash_Blake2s_128", "Hacl_Hash_SHA3", "Hacl_Hash_Base", "Hacl_Hash_MD5", "Hacl_Hash_SHA1", "Hacl_Hash_SHA2", "EverCrypt_TargetConfig", "EverCrypt", "Vale", "EverCrypt_Hash", "Hacl_Chacha20", "Hacl_Salsa20", "Hacl_IntTypes_Intrinsics", "Hacl_Bignum_Base", "Hacl_Bignum", "Hacl_Bignum25519_51", "Hacl_Curve25519_51", "Hacl_Ed25519_PrecompTable", "Hacl_Ed25519", "Hacl_Poly1305_32", "Hacl_NaCl", "Hacl_P256_PrecompTable", "Hacl_P256", "Hacl_Bignum_K256", "Hacl_K256_PrecompTable", "Hacl_K256_ECDSA", "Hacl_HMAC", "Hacl_HKDF", "Hacl_Chacha20Poly1305_32", "Hacl_HPKE_Curve51_CP32_SHA256", "Hacl_HPKE_Curve51_CP32_SHA512", "Hacl_Streaming_Blake2b_256", "Hacl_Streaming_Blake2s_128", "Hacl_GenericField32", "Hacl_SHA2_Vec256", "Hacl_EC_K256", "Hacl_Bignum4096", "Hacl_Chacha20_Vec32", "Hacl_Bignum4096_32", "Hacl_HMAC_Blake2s_128", "Hacl_HKDF_Blake2s_128", "Hacl_GenericField64", "Hacl_Bignum32", "Hacl_Bignum256_32", "Hacl_SHA2_Vec128", "Hacl_Streaming_Poly1305_32", "Hacl_HMAC_DRBG", "Hacl_Streaming_Blake2", "Hacl_Bignum64", "Hacl_HMAC_Blake2b_256", "Hacl_HKDF_Blake2b_256", "Hacl_EC_Ed25519", "Hacl_Bignum256"];
+      var my_modules = ["WasmSupport", "FStar", "LowStar_Endianness", "Hacl_Impl_Blake2_Constants", "Hacl_Lib", "Hacl_Hash_Blake2b", "Hacl_Hash_Blake2s", "Hacl_Hash_Blake2b_Simd256", "Hacl_Hash_Blake2s_Simd128", "Hacl_Hash_Base", "Hacl_Hash_SHA1", "Hacl_Hash_SHA2", "Hacl_HMAC", "Hacl_HMAC_Blake2s_128", "Hacl_HMAC_Blake2b_256", "Hacl_Hash_SHA3", "Hacl_Hash_MD5", "EverCrypt_TargetConfig", "EverCrypt", "Vale", "EverCrypt_Hash", "Hacl_Chacha20", "Hacl_Chacha20_Vec128_Hacl_Chacha20_Vec256", "Hacl_Salsa20", "Hacl_IntTypes_Intrinsics", "Hacl_Bignum_Base", "Hacl_Bignum", "Hacl_Bignum25519_51", "Hacl_Curve25519_51", "Hacl_MAC_Poly1305", "Hacl_AEAD_Chacha20Poly1305", "Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305", "Hacl_AEAD_Chacha20Poly1305_Simd128", "Hacl_AEAD_Chacha20Poly1305_Simd256", "Hacl_Ed25519_PrecompTable", "Hacl_Ed25519", "Hacl_NaCl", "Hacl_P256_PrecompTable", "Hacl_P256", "Hacl_Bignum_K256", "Hacl_K256_PrecompTable", "Hacl_K256_ECDSA", "Hacl_HKDF", "Hacl_HPKE_Curve51_CP32_SHA256", "Hacl_HPKE_Curve51_CP32_SHA512", "Hacl_GenericField32", "Hacl_SHA2_Vec256", "Hacl_EC_K256", "Hacl_Bignum4096", "Hacl_Chacha20_Vec32", "Hacl_Bignum4096_32", "Hacl_HKDF_Blake2s_128", "Hacl_GenericField64", "Hacl_Bignum32", "Hacl_Bignum256_32", "Hacl_SHA2_Vec128", "Hacl_HMAC_DRBG", "Hacl_Bignum64", "Hacl_HKDF_Blake2b_256", "Hacl_EC_Ed25519", "Hacl_Bignum256"];
     </script>
     <script type="application/javascript" src="browser.js"></script>
     <script type="application/javascript" src="loader.js"></script>
diff --git a/src/wasm/shell.js b/src/wasm/shell.js
index 2b0217e7..cc877fd1 100644
--- a/src/wasm/shell.js
+++ b/src/wasm/shell.js
@@ -1,7 +1,7 @@
 
 // To be loaded by main.js
 var my_js_files = ["./test.js"];
-var my_modules = ["WasmSupport", "FStar", "LowStar_Endianness", "Hacl_Impl_Blake2_Constants", "Hacl_Lib", "Hacl_Hash_Blake2", "Hacl_Hash_Blake2b_256", "Hacl_Hash_Blake2s_128", "Hacl_Hash_SHA3", "Hacl_Hash_Base", "Hacl_Hash_MD5", "Hacl_Hash_SHA1", "Hacl_Hash_SHA2", "EverCrypt_TargetConfig", "EverCrypt", "Vale", "EverCrypt_Hash", "Hacl_Chacha20", "Hacl_Salsa20", "Hacl_IntTypes_Intrinsics", "Hacl_Bignum_Base", "Hacl_Bignum", "Hacl_Bignum25519_51", "Hacl_Curve25519_51", "Hacl_Ed25519_PrecompTable", "Hacl_Ed25519", "Hacl_Poly1305_32", "Hacl_NaCl", "Hacl_P256_PrecompTable", "Hacl_P256", "Hacl_Bignum_K256", "Hacl_K256_PrecompTable", "Hacl_K256_ECDSA", "Hacl_HMAC", "Hacl_HKDF", "Hacl_Chacha20Poly1305_32", "Hacl_HPKE_Curve51_CP32_SHA256", "Hacl_HPKE_Curve51_CP32_SHA512", "Hacl_Streaming_Blake2b_256", "Hacl_Streaming_Blake2s_128", "Hacl_GenericField32", "Hacl_SHA2_Vec256", "Hacl_EC_K256", "Hacl_Bignum4096", "Hacl_Chacha20_Vec32", "Hacl_Bignum4096_32", "Hacl_HMAC_Blake2s_128", "Hacl_HKDF_Blake2s_128", "Hacl_GenericField64", "Hacl_Bignum32", "Hacl_Bignum256_32", "Hacl_SHA2_Vec128", "Hacl_Streaming_Poly1305_32", "Hacl_HMAC_DRBG", "Hacl_Streaming_Blake2", "Hacl_Bignum64", "Hacl_HMAC_Blake2b_256", "Hacl_HKDF_Blake2b_256", "Hacl_EC_Ed25519", "Hacl_Bignum256"];
+var my_modules = ["WasmSupport", "FStar", "LowStar_Endianness", "Hacl_Impl_Blake2_Constants", "Hacl_Lib", "Hacl_Hash_Blake2b", "Hacl_Hash_Blake2s", "Hacl_Hash_Blake2b_Simd256", "Hacl_Hash_Blake2s_Simd128", "Hacl_Hash_Base", "Hacl_Hash_SHA1", "Hacl_Hash_SHA2", "Hacl_HMAC", "Hacl_HMAC_Blake2s_128", "Hacl_HMAC_Blake2b_256", "Hacl_Hash_SHA3", "Hacl_Hash_MD5", "EverCrypt_TargetConfig", "EverCrypt", "Vale", "EverCrypt_Hash", "Hacl_Chacha20", "Hacl_Chacha20_Vec128_Hacl_Chacha20_Vec256", "Hacl_Salsa20", "Hacl_IntTypes_Intrinsics", "Hacl_Bignum_Base", "Hacl_Bignum", "Hacl_Bignum25519_51", "Hacl_Curve25519_51", "Hacl_MAC_Poly1305", "Hacl_AEAD_Chacha20Poly1305", "Hacl_Poly1305_128_Hacl_Poly1305_256_Hacl_Impl_Poly1305", "Hacl_AEAD_Chacha20Poly1305_Simd128", "Hacl_AEAD_Chacha20Poly1305_Simd256", "Hacl_Ed25519_PrecompTable", "Hacl_Ed25519", "Hacl_NaCl", "Hacl_P256_PrecompTable", "Hacl_P256", "Hacl_Bignum_K256", "Hacl_K256_PrecompTable", "Hacl_K256_ECDSA", "Hacl_HKDF", "Hacl_HPKE_Curve51_CP32_SHA256", "Hacl_HPKE_Curve51_CP32_SHA512", "Hacl_GenericField32", "Hacl_SHA2_Vec256", "Hacl_EC_K256", "Hacl_Bignum4096", "Hacl_Chacha20_Vec32", "Hacl_Bignum4096_32", "Hacl_HKDF_Blake2s_128", "Hacl_GenericField64", "Hacl_Bignum32", "Hacl_Bignum256_32", "Hacl_SHA2_Vec128", "Hacl_HMAC_DRBG", "Hacl_Bignum64", "Hacl_HKDF_Blake2b_256", "Hacl_EC_Ed25519", "Hacl_Bignum256"];
 var my_debug = false;
 
 if (typeof module !== "undefined")